]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'wireless-next/master'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:35:39 +0000 (14:35 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:35:39 +0000 (14:35 +0200)
Conflicts:
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/iwlwifi/pcie/drv.c

3157 files changed:
CREDITS
Documentation/ABI/stable/sysfs-bus-usb
Documentation/ABI/testing/sysfs-class-net-batman-adv
Documentation/ABI/testing/sysfs-class-net-mesh
Documentation/ABI/testing/sysfs-class-powercap [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-power
Documentation/DMA-API-HOWTO.txt
Documentation/DMA-API.txt
Documentation/PCI/pci.txt
Documentation/acpi/dsdt-override.txt
Documentation/arm64/tagged-pointers.txt
Documentation/block/00-INDEX
Documentation/block/cmdline-partition.txt
Documentation/connector/ucon.c
Documentation/devices.txt
Documentation/devicetree/bindings/arm/cci.txt
Documentation/devicetree/bindings/dma/atmel-dma.txt
Documentation/devicetree/bindings/i2c/i2c-rcar.txt [new file with mode: 0644]
Documentation/devicetree/bindings/memory.txt [deleted file]
Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt [moved from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt with 93% similarity]
Documentation/devicetree/bindings/mmc/tmio_mmc.txt
Documentation/devicetree/bindings/net/cpsw-phy-sel.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt [moved from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt with 100% similarity]
Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt [new file with mode: 0644]
Documentation/filesystems/Locking
Documentation/filesystems/caching/netfs-api.txt
Documentation/filesystems/vfs.txt
Documentation/hwmon/lm25066
Documentation/hwmon/ltc2978
Documentation/kernel-parameters.txt
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/power/opp.txt
Documentation/power/powercap/powercap.txt [new file with mode: 0644]
Documentation/power/runtime_pm.txt
Documentation/ptp/testptp.c
Documentation/s390/s390dbf.txt
Documentation/scheduler/sched-arch.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/include/uapi/asm/socket.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/uaccess.h
arch/arc/kernel/kprobes.c
arch/arc/kernel/ptrace.c
arch/arc/kernel/signal.c
arch/arc/kernel/time.c
arch/arc/kernel/unaligned.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi [new file with mode: 0644]
arch/arm/boot/dts/am335x-bone.dts
arch/arm/boot/dts/am335x-boneblack.dts [new file with mode: 0644]
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/armada-370-netgear-rn102.dts
arch/arm/boot/dts/armada-xp.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/atlas6.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx6q-pinfunc.h
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/prima2.dtsi
arch/arm/boot/dts/r8a73a4.dtsi
arch/arm/boot/dts/r8a7778.dtsi
arch/arm/boot/dts/r8a7779.dtsi
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/sh73a0.dtsi
arch/arm/boot/install.sh
arch/arm/common/Makefile
arch/arm/common/bL_switcher.c [new file with mode: 0644]
arch/arm/common/bL_switcher_dummy_if.c [new file with mode: 0644]
arch/arm/common/edma.c
arch/arm/common/mcpm_entry.c
arch/arm/common/mcpm_head.S
arch/arm/common/sharpsl_param.c
arch/arm/common/timer-sp.c
arch/arm/configs/h3600_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/.gitignore [new file with mode: 0644]
arch/arm/crypto/Makefile
arch/arm/crypto/aes-armv4.S
arch/arm/crypto/aes_glue.c
arch/arm/crypto/aes_glue.h [new file with mode: 0644]
arch/arm/crypto/aesbs-core.S_shipped [new file with mode: 0644]
arch/arm/crypto/aesbs-glue.c [new file with mode: 0644]
arch/arm/crypto/bsaes-armv7.pl [new file with mode: 0644]
arch/arm/include/asm/Kbuild
arch/arm/include/asm/atomic.h
arch/arm/include/asm/bL_switcher.h [new file with mode: 0644]
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/jump_label.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock_types.h
arch/arm/include/asm/syscall.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/unified.h
arch/arm/include/debug/efm32.S [new file with mode: 0644]
arch/arm/include/uapi/asm/Kbuild
arch/arm/include/uapi/asm/perf_regs.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_regs.c [new file with mode: 0644]
arch/arm/kernel/setup.c
arch/arm/kernel/sleep.S
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/suspend.c
arch/arm/kvm/reset.c
arch/arm/lib/bitops.h
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-at91/Makefile
arch/arm/mach-at91/at91rm9200.c
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9261.c
arch/arm/mach-at91/at91sam9263.c
arch/arm/mach-at91/at91sam926x_time.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_reset.S
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/at91x40_time.c
arch/arm/mach-at91/board-sam9260ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm.h
arch/arm/mach-at91/setup.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/board-da830-evm.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/board-sffsdr.c
arch/arm/mach-davinci/include/mach/serial.h
arch/arm/mach-exynos/common.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-exynos/mach-exynos4-dt.c
arch/arm/mach-exynos/mach-exynos5-dt.c
arch/arm/mach-highbank/Kconfig
arch/arm/mach-imx/clk-fixup-mux.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mach-pca100.c
arch/arm/mach-imx/mach-pcm037.c
arch/arm/mach-imx/mach-pcm038.c
arch/arm/mach-imx/mach-pcm043.c
arch/arm/mach-imx/mach-vpr200.c
arch/arm/mach-imx/system.c
arch/arm/mach-integrator/pci_v3.h
arch/arm/mach-kirkwood/lacie_v2-common.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/system-controller.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/mux34xx.c
arch/arm/mach-omap2/omap-pm.h
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/opp.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-pxa/Kconfig
arch/arm/mach-pxa/stargate2.c
arch/arm/mach-s3c24xx/mach-mini2440.c
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/collie.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mach-sa1100/include/mach/gpio.h [deleted file]
arch/arm/mach-sa1100/include/mach/h3xxx.h
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/clock-r8a73a4.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-u300/Kconfig
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/cache-l2x0.c
arch/arm/mach-vexpress/tc2_pm.c
arch/arm/mach-zynq/common.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig.debug
arch/arm64/configs/defconfig
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/process.c
arch/arm64/kernel/setup.c
arch/arm64/mm/fault.c
arch/arm64/mm/tlb.S
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/cputime.h [deleted file]
arch/avr32/include/asm/delay.h [deleted file]
arch/avr32/include/asm/device.h [deleted file]
arch/avr32/include/asm/div64.h [deleted file]
arch/avr32/include/asm/emergency-restart.h [deleted file]
arch/avr32/include/asm/futex.h [deleted file]
arch/avr32/include/asm/irq_regs.h [deleted file]
arch/avr32/include/asm/local.h [deleted file]
arch/avr32/include/asm/local64.h [deleted file]
arch/avr32/include/asm/percpu.h [deleted file]
arch/avr32/include/asm/scatterlist.h [deleted file]
arch/avr32/include/asm/sections.h [deleted file]
arch/avr32/include/asm/topology.h [deleted file]
arch/avr32/include/asm/xor.h [deleted file]
arch/avr32/include/uapi/asm/socket.h
arch/avr32/kernel/process.c
arch/avr32/kernel/time.c
arch/blackfin/Kconfig
arch/c6x/Kconfig
arch/cris/Kconfig
arch/cris/include/uapi/asm/socket.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/Kconfig [deleted file]
arch/h8300/Kconfig.cpu [deleted file]
arch/h8300/Kconfig.debug [deleted file]
arch/h8300/Kconfig.ide [deleted file]
arch/h8300/Makefile [deleted file]
arch/h8300/README [deleted file]
arch/h8300/boot/Makefile [deleted file]
arch/h8300/boot/compressed/Makefile [deleted file]
arch/h8300/boot/compressed/head.S [deleted file]
arch/h8300/boot/compressed/misc.c [deleted file]
arch/h8300/boot/compressed/vmlinux.lds [deleted file]
arch/h8300/boot/compressed/vmlinux.scr [deleted file]
arch/h8300/defconfig [deleted file]
arch/h8300/include/asm/Kbuild [deleted file]
arch/h8300/include/asm/asm-offsets.h [deleted file]
arch/h8300/include/asm/atomic.h [deleted file]
arch/h8300/include/asm/barrier.h [deleted file]
arch/h8300/include/asm/bitops.h [deleted file]
arch/h8300/include/asm/bootinfo.h [deleted file]
arch/h8300/include/asm/bug.h [deleted file]
arch/h8300/include/asm/bugs.h [deleted file]
arch/h8300/include/asm/cache.h [deleted file]
arch/h8300/include/asm/cachectl.h [deleted file]
arch/h8300/include/asm/cacheflush.h [deleted file]
arch/h8300/include/asm/checksum.h [deleted file]
arch/h8300/include/asm/cmpxchg.h [deleted file]
arch/h8300/include/asm/cputime.h [deleted file]
arch/h8300/include/asm/current.h [deleted file]
arch/h8300/include/asm/dbg.h [deleted file]
arch/h8300/include/asm/delay.h [deleted file]
arch/h8300/include/asm/device.h [deleted file]
arch/h8300/include/asm/div64.h [deleted file]
arch/h8300/include/asm/dma.h [deleted file]
arch/h8300/include/asm/elf.h [deleted file]
arch/h8300/include/asm/emergency-restart.h [deleted file]
arch/h8300/include/asm/fb.h [deleted file]
arch/h8300/include/asm/flat.h [deleted file]
arch/h8300/include/asm/fpu.h [deleted file]
arch/h8300/include/asm/ftrace.h [deleted file]
arch/h8300/include/asm/futex.h [deleted file]
arch/h8300/include/asm/gpio-internal.h [deleted file]
arch/h8300/include/asm/hardirq.h [deleted file]
arch/h8300/include/asm/hw_irq.h [deleted file]
arch/h8300/include/asm/io.h [deleted file]
arch/h8300/include/asm/irq.h [deleted file]
arch/h8300/include/asm/irq_regs.h [deleted file]
arch/h8300/include/asm/irqflags.h [deleted file]
arch/h8300/include/asm/kdebug.h [deleted file]
arch/h8300/include/asm/kmap_types.h [deleted file]
arch/h8300/include/asm/local.h [deleted file]
arch/h8300/include/asm/local64.h [deleted file]
arch/h8300/include/asm/mc146818rtc.h [deleted file]
arch/h8300/include/asm/mmu_context.h [deleted file]
arch/h8300/include/asm/mutex.h [deleted file]
arch/h8300/include/asm/page.h [deleted file]
arch/h8300/include/asm/page_offset.h [deleted file]
arch/h8300/include/asm/param.h [deleted file]
arch/h8300/include/asm/pci.h [deleted file]
arch/h8300/include/asm/percpu.h [deleted file]
arch/h8300/include/asm/pgalloc.h [deleted file]
arch/h8300/include/asm/pgtable.h [deleted file]
arch/h8300/include/asm/processor.h [deleted file]
arch/h8300/include/asm/ptrace.h [deleted file]
arch/h8300/include/asm/regs267x.h [deleted file]
arch/h8300/include/asm/regs306x.h [deleted file]
arch/h8300/include/asm/scatterlist.h [deleted file]
arch/h8300/include/asm/sections.h [deleted file]
arch/h8300/include/asm/segment.h [deleted file]
arch/h8300/include/asm/sh_bios.h [deleted file]
arch/h8300/include/asm/shm.h [deleted file]
arch/h8300/include/asm/shmparam.h [deleted file]
arch/h8300/include/asm/signal.h [deleted file]
arch/h8300/include/asm/smp.h [deleted file]
arch/h8300/include/asm/spinlock.h [deleted file]
arch/h8300/include/asm/string.h [deleted file]
arch/h8300/include/asm/switch_to.h [deleted file]
arch/h8300/include/asm/target_time.h [deleted file]
arch/h8300/include/asm/termios.h [deleted file]
arch/h8300/include/asm/thread_info.h [deleted file]
arch/h8300/include/asm/timer.h [deleted file]
arch/h8300/include/asm/timex.h [deleted file]
arch/h8300/include/asm/tlb.h [deleted file]
arch/h8300/include/asm/tlbflush.h [deleted file]
arch/h8300/include/asm/topology.h [deleted file]
arch/h8300/include/asm/traps.h [deleted file]
arch/h8300/include/asm/types.h [deleted file]
arch/h8300/include/asm/uaccess.h [deleted file]
arch/h8300/include/asm/ucontext.h [deleted file]
arch/h8300/include/asm/unaligned.h [deleted file]
arch/h8300/include/asm/unistd.h [deleted file]
arch/h8300/include/asm/user.h [deleted file]
arch/h8300/include/asm/virtconvert.h [deleted file]
arch/h8300/include/uapi/asm/Kbuild [deleted file]
arch/h8300/include/uapi/asm/auxvec.h [deleted file]
arch/h8300/include/uapi/asm/bitsperlong.h [deleted file]
arch/h8300/include/uapi/asm/byteorder.h [deleted file]
arch/h8300/include/uapi/asm/errno.h [deleted file]
arch/h8300/include/uapi/asm/fcntl.h [deleted file]
arch/h8300/include/uapi/asm/ioctl.h [deleted file]
arch/h8300/include/uapi/asm/ioctls.h [deleted file]
arch/h8300/include/uapi/asm/ipcbuf.h [deleted file]
arch/h8300/include/uapi/asm/kvm_para.h [deleted file]
arch/h8300/include/uapi/asm/mman.h [deleted file]
arch/h8300/include/uapi/asm/msgbuf.h [deleted file]
arch/h8300/include/uapi/asm/param.h [deleted file]
arch/h8300/include/uapi/asm/poll.h [deleted file]
arch/h8300/include/uapi/asm/posix_types.h [deleted file]
arch/h8300/include/uapi/asm/ptrace.h [deleted file]
arch/h8300/include/uapi/asm/resource.h [deleted file]
arch/h8300/include/uapi/asm/sembuf.h [deleted file]
arch/h8300/include/uapi/asm/setup.h [deleted file]
arch/h8300/include/uapi/asm/shmbuf.h [deleted file]
arch/h8300/include/uapi/asm/sigcontext.h [deleted file]
arch/h8300/include/uapi/asm/siginfo.h [deleted file]
arch/h8300/include/uapi/asm/signal.h [deleted file]
arch/h8300/include/uapi/asm/socket.h [deleted file]
arch/h8300/include/uapi/asm/sockios.h [deleted file]
arch/h8300/include/uapi/asm/stat.h [deleted file]
arch/h8300/include/uapi/asm/statfs.h [deleted file]
arch/h8300/include/uapi/asm/swab.h [deleted file]
arch/h8300/include/uapi/asm/termbits.h [deleted file]
arch/h8300/include/uapi/asm/termios.h [deleted file]
arch/h8300/include/uapi/asm/types.h [deleted file]
arch/h8300/include/uapi/asm/unistd.h [deleted file]
arch/h8300/kernel/Makefile [deleted file]
arch/h8300/kernel/asm-offsets.c [deleted file]
arch/h8300/kernel/entry.S [deleted file]
arch/h8300/kernel/gpio.c [deleted file]
arch/h8300/kernel/h8300_ksyms.c [deleted file]
arch/h8300/kernel/irq.c [deleted file]
arch/h8300/kernel/module.c [deleted file]
arch/h8300/kernel/process.c [deleted file]
arch/h8300/kernel/ptrace.c [deleted file]
arch/h8300/kernel/setup.c [deleted file]
arch/h8300/kernel/signal.c [deleted file]
arch/h8300/kernel/sys_h8300.c [deleted file]
arch/h8300/kernel/syscalls.S [deleted file]
arch/h8300/kernel/time.c [deleted file]
arch/h8300/kernel/timer/Makefile [deleted file]
arch/h8300/kernel/timer/itu.c [deleted file]
arch/h8300/kernel/timer/timer16.c [deleted file]
arch/h8300/kernel/timer/timer8.c [deleted file]
arch/h8300/kernel/timer/tpu.c [deleted file]
arch/h8300/kernel/traps.c [deleted file]
arch/h8300/kernel/vmlinux.lds.S [deleted file]
arch/h8300/lib/Makefile [deleted file]
arch/h8300/lib/abs.S [deleted file]
arch/h8300/lib/ashrdi3.c [deleted file]
arch/h8300/lib/checksum.c [deleted file]
arch/h8300/lib/memcpy.S [deleted file]
arch/h8300/lib/memset.S [deleted file]
arch/h8300/lib/romfs.S [deleted file]
arch/h8300/mm/Makefile [deleted file]
arch/h8300/mm/fault.c [deleted file]
arch/h8300/mm/init.c [deleted file]
arch/h8300/mm/kmap.c [deleted file]
arch/h8300/mm/memory.c [deleted file]
arch/h8300/platform/h8300h/Makefile [deleted file]
arch/h8300/platform/h8300h/aki3068net/Makefile [deleted file]
arch/h8300/platform/h8300h/aki3068net/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/generic/Makefile [deleted file]
arch/h8300/platform/h8300h/generic/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/generic/crt0_rom.S [deleted file]
arch/h8300/platform/h8300h/h8max/Makefile [deleted file]
arch/h8300/platform/h8300h/h8max/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/irq.c [deleted file]
arch/h8300/platform/h8300h/ptrace_h8300h.c [deleted file]
arch/h8300/platform/h8s/Makefile [deleted file]
arch/h8300/platform/h8s/edosk2674/Makefile [deleted file]
arch/h8300/platform/h8s/edosk2674/crt0_ram.S [deleted file]
arch/h8300/platform/h8s/edosk2674/crt0_rom.S [deleted file]
arch/h8300/platform/h8s/generic/Makefile [deleted file]
arch/h8300/platform/h8s/generic/crt0_ram.S [deleted file]
arch/h8300/platform/h8s/generic/crt0_rom.S [deleted file]
arch/h8300/platform/h8s/irq.c [deleted file]
arch/h8300/platform/h8s/ptrace_h8s.c [deleted file]
arch/ia64/Kconfig
arch/ia64/include/uapi/asm/socket.h
arch/ia64/kernel/acpi.c
arch/m32r/include/uapi/asm/socket.h
arch/m68k/Kconfig
arch/m68k/include/asm/floppy.h
arch/m68k/include/asm/sun3xflop.h
arch/m68k/include/asm/uaccess.h
arch/m68k/platform/68000/timers.c
arch/m68k/platform/68360/config.c
arch/m68k/platform/coldfire/pit.c
arch/m68k/platform/coldfire/sltimers.c
arch/m68k/platform/coldfire/timers.c
arch/microblaze/Kconfig
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/alchemy/board-mtx1.c
arch/mips/alchemy/devboards/db1235.c
arch/mips/ath79/dev-common.c
arch/mips/bcm47xx/Makefile
arch/mips/bcm47xx/board.c [new file with mode: 0644]
arch/mips/bcm47xx/nvram.c
arch/mips/bcm47xx/prom.c
arch/mips/bcm47xx/setup.c
arch/mips/bcm47xx/time.c
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/decompress.c
arch/mips/boot/compressed/ld.script
arch/mips/cavium-octeon/setup.c
arch/mips/cobalt/Makefile
arch/mips/cobalt/console.c [deleted file]
arch/mips/cobalt/setup.c
arch/mips/configs/powertv_defconfig [deleted file]
arch/mips/dec/int-handler.S
arch/mips/dec/ioasic-irq.c
arch/mips/dec/prom/call_o32.S
arch/mips/dec/prom/init.c
arch/mips/dec/prom/memory.c
arch/mips/dec/setup.c
arch/mips/include/asm/cacheops.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/dec/ioasic.h
arch/mips/include/asm/dec/ioasic_addrs.h
arch/mips/include/asm/dec/kn01.h
arch/mips/include/asm/dec/kn02ca.h
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/jump_label.h
arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h [deleted file]
arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
arch/mips/include/asm/mach-dec/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-generic/dma-coherence.h
arch/mips/include/asm/mach-ip27/dma-coherence.h
arch/mips/include/asm/mach-ip32/dma-coherence.h
arch/mips/include/asm/mach-jazz/dma-coherence.h
arch/mips/include/asm/mach-loongson/dma-coherence.h
arch/mips/include/asm/mach-powertv/asic.h [deleted file]
arch/mips/include/asm/mach-powertv/asic_reg_map.h [deleted file]
arch/mips/include/asm/mach-powertv/asic_regs.h [deleted file]
arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h [deleted file]
arch/mips/include/asm/mach-powertv/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-powertv/interrupts.h [deleted file]
arch/mips/include/asm/mach-powertv/ioremap.h [deleted file]
arch/mips/include/asm/mach-powertv/irq.h [deleted file]
arch/mips/include/asm/mach-powertv/powertv-clock.h [deleted file]
arch/mips/include/asm/mach-powertv/war.h [deleted file]
arch/mips/include/asm/mips-boards/piix4.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/setup.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/syscall.h [new file with mode: 0644]
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/unistd.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/include/uapi/asm/socket.h
arch/mips/kernel/Makefile
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/csrc-powertv.c [deleted file]
arch/mips/kernel/early_printk_8250.c [new file with mode: 0644]
arch/mips/kernel/ftrace.c
arch/mips/kernel/genex.S
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/module.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/ptrace.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/lantiq/irq.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/mm/c-r4k.c
arch/mips/mm/dma-default.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-int.c
arch/mips/netlogic/common/smp.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci.c
arch/mips/powertv/Kconfig [deleted file]
arch/mips/powertv/Makefile [deleted file]
arch/mips/powertv/Platform [deleted file]
arch/mips/powertv/asic/Makefile [deleted file]
arch/mips/powertv/asic/asic-calliope.c [deleted file]
arch/mips/powertv/asic/asic-cronus.c [deleted file]
arch/mips/powertv/asic/asic-gaia.c [deleted file]
arch/mips/powertv/asic/asic-zeus.c [deleted file]
arch/mips/powertv/asic/asic_devices.c [deleted file]
arch/mips/powertv/asic/asic_int.c [deleted file]
arch/mips/powertv/asic/irq_asic.c [deleted file]
arch/mips/powertv/asic/prealloc-calliope.c [deleted file]
arch/mips/powertv/asic/prealloc-cronus.c [deleted file]
arch/mips/powertv/asic/prealloc-cronuslite.c [deleted file]
arch/mips/powertv/asic/prealloc-gaia.c [deleted file]
arch/mips/powertv/asic/prealloc-zeus.c [deleted file]
arch/mips/powertv/asic/prealloc.h [deleted file]
arch/mips/powertv/init.c [deleted file]
arch/mips/powertv/init.h [deleted file]
arch/mips/powertv/ioremap.c [deleted file]
arch/mips/powertv/memory.c [deleted file]
arch/mips/powertv/pci/Makefile [deleted file]
arch/mips/powertv/pci/fixup-powertv.c [deleted file]
arch/mips/powertv/pci/powertv-pci.h [deleted file]
arch/mips/powertv/powertv-clock.h [deleted file]
arch/mips/powertv/powertv-usb.c [deleted file]
arch/mips/powertv/powertv_setup.c [deleted file]
arch/mips/powertv/reset.c [deleted file]
arch/mips/powertv/reset.h [deleted file]
arch/mips/powertv/time.c [deleted file]
arch/mips/ralink/clk.c
arch/mips/ralink/mt7620.c
arch/mips/ralink/of.c
arch/mips/ralink/rt305x.c
arch/mn10300/include/uapi/asm/socket.h
arch/openrisc/include/asm/prom.h
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/configs/712_defconfig
arch/parisc/configs/a500_defconfig
arch/parisc/configs/b180_defconfig
arch/parisc/configs/c3000_defconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/default_defconfig
arch/parisc/configs/generic-32bit_defconfig [new file with mode: 0644]
arch/parisc/configs/generic-64bit_defconfig [new file with mode: 0644]
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/socket.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/include/asm/traps.h
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/install.sh
arch/parisc/kernel/Makefile
arch/parisc/kernel/audit.c [new file with mode: 0644]
arch/parisc/kernel/compat_audit.c [new file with mode: 0644]
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/smp.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/traps.c
arch/parisc/lib/lusercopy.S
arch/parisc/lib/memcpy.c
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/boot/Makefile
arch/powerpc/boot/epapr-wrapper.c [new file with mode: 0644]
arch/powerpc/boot/epapr.c
arch/powerpc/boot/of.c
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/archrandom.h [new file with mode: 0644]
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/hvsi.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/jump_label.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/pgtable-ppc64.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/scom.h
arch/powerpc/include/asm/sfp-machine.h
arch/powerpc/include/asm/string.h
arch/powerpc/include/asm/word-at-a-time.h
arch/powerpc/include/uapi/asm/byteorder.h
arch/powerpc/include/uapi/asm/socket.h
arch/powerpc/kernel/align.c
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/ptrace32.c
arch/powerpc/kernel/rtas_pci.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/tm.S
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso32/vdso32.lds.S
arch/powerpc/kernel/vdso64/vdso64.lds.S
arch/powerpc/kernel/vecemu.c
arch/powerpc/kernel/vector.S
arch/powerpc/kernel/vio.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/Makefile
arch/powerpc/lib/checksum_64.S
arch/powerpc/lib/copyuser_power7.S
arch/powerpc/lib/memcpy_power7.S
arch/powerpc/lib/sstep.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/512x/mpc512x_shared.c
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/8xx/tqm8xx_setup.c
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/opal-nvram.c
arch/powerpc/platforms/powernv/opal-rtc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal-xscom.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci-p5ioc2.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/rng.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/rng.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/wsp/scom_smp.c
arch/powerpc/platforms/wsp/scom_wsp.c
arch/powerpc/platforms/wsp/wsp.c
arch/powerpc/sysdev/Kconfig
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/scom.c
arch/powerpc/sysdev/xics/ics-opal.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/configs/default_defconfig [new file with mode: 0644]
arch/s390/configs/gcov_defconfig [new file with mode: 0644]
arch/s390/configs/performance_defconfig [new file with mode: 0644]
arch/s390/configs/zfcpdump_defconfig [new file with mode: 0644]
arch/s390/crypto/aes_s390.c
arch/s390/defconfig
arch/s390/include/asm/atomic.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/compat.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/dis.h [new file with mode: 0644]
arch/s390/include/asm/fcx.h
arch/s390/include/asm/ipl.h
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/mutex.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci_debug.h
arch/s390/include/asm/pci_insn.h
arch/s390/include/asm/percpu.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/smp.h
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/switch_to.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/uaccess.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sigcontext.h
arch/s390/include/uapi/asm/socket.h
arch/s390/kernel/Makefile
arch/s390/kernel/bitmap.c [deleted file]
arch/s390/kernel/cache.c
arch/s390/kernel/compat_linux.c
arch/s390/kernel/compat_linux.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/ftrace.c
arch/s390/kernel/head.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/runtime_instr.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vtime.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/trace.h
arch/s390/lib/Makefile
arch/s390/lib/delay.c
arch/s390/lib/find.c [new file with mode: 0644]
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_pt.c
arch/s390/lib/uaccess_std.c [deleted file]
arch/s390/math-emu/math.c
arch/s390/mm/cmm.c
arch/s390/mm/fault.c
arch/s390/mm/gup.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_event.c
arch/score/Kconfig
arch/score/Makefile
arch/score/include/asm/checksum.h
arch/score/include/asm/io.h
arch/score/include/asm/pgalloc.h
arch/score/kernel/entry.S
arch/score/kernel/process.c
arch/sh/Kconfig
arch/sh/include/asm/hw_breakpoint.h
arch/sh/include/cpu-common/cpu/ubc.h [new file with mode: 0644]
arch/sh/include/cpu-sh2a/cpu/ubc.h [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/Makefile
arch/sh/kernel/cpu/sh2a/ubc.c [new file with mode: 0644]
arch/sh/kernel/hw_breakpoint.c
arch/sparc/Kconfig
arch/sparc/include/asm/floppy_64.h
arch/sparc/include/asm/jump_label.h
arch/sparc/include/uapi/asm/socket.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/ds.c
arch/sparc/kernel/ldc.c
arch/sparc/net/bpf_jit_comp.c
arch/tile/include/asm/atomic.h
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/cmpxchg.h
arch/tile/include/asm/percpu.h
arch/tile/kernel/hardwall.c
arch/tile/kernel/intvec_32.S
arch/tile/kernel/intvec_64.S
arch/tile/kernel/stack.c
arch/tile/lib/atomic_32.c
arch/unicore32/Kconfig
arch/x86/Kconfig
arch/x86/include/asm/acpi.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/mpspec.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/mutex_64.h
arch/x86/include/asm/xen/page.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/jump_label.c
arch/x86/kernel/kvm.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/sysfb_simplefb.c
arch/x86/kernel/topology.c
arch/x86/kvm/vmx.c
arch/x86/lib/msr-smp.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/fixup.c
arch/x86/pci/mmconfig-shared.c
arch/x86/platform/efi/efi.c
arch/x86/platform/olpc/olpc-xo15-sci.c
arch/x86/xen/p2m.c
arch/x86/xen/smp.c
arch/x86/xen/spinlock.c
arch/xtensa/include/uapi/asm/socket.h
block/Kconfig
block/Makefile
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/blk-settings.c
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/genhd.c
block/partitions/Kconfig
block/partitions/cmdline.c
block/partitions/efi.c
crypto/Kconfig
crypto/async_tx/async_tx.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_ipmi.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_memhotplug.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/apei/apei-base.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/cm_sbs.c [deleted file]
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/internal.h
drivers/acpi/numa.c
drivers/acpi/osl.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/proc.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/amba/bus.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/ahci_platform.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-acpi.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_isapnp.c
drivers/ata/pata_ixp4xx_cf.c
drivers/ata/pata_octeon_cf.c
drivers/ata/sata_promise.c
drivers/atm/firestream.h
drivers/base/core.c
drivers/base/cpu.c
drivers/base/memory.c
drivers/base/power/main.c
drivers/base/power/opp.c
drivers/base/power/runtime.c
drivers/bcma/driver_pci.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/loop.c
drivers/block/nvme-core.c
drivers/bus/arm-cci.c
drivers/bus/mvebu-mbus.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/powernv-rng.c [new file with mode: 0644]
drivers/char/hw_random/pseries-rng.c
drivers/char/random.c
drivers/char/raw.c
drivers/char/tpm/xen-tpmfront.c
drivers/clocksource/Kconfig
drivers/clocksource/clksrc-of.c
drivers/clocksource/em_sti.c
drivers/clocksource/exynos_mct.c
drivers/connector/cn_proc.c
drivers/connector/connector.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/at32ap-cpufreq.c
drivers/cpufreq/blackfin-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq-nforce2.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/dbx500-cpufreq.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos4210-cpufreq.c
drivers/cpufreq/exynos4x12-cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/gx-suspmod.c
drivers/cpufreq/highbank-cpufreq.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/integrator-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/longrun.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/pmac64-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sa1100-cpufreq.c
drivers/cpufreq/sa1110-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/spear-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/tegra-cpufreq.c
drivers/cpufreq/unicore2-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-at91.c [moved from arch/arm/mach-at91/cpuidle.c with 79% similarity]
drivers/cpuidle/cpuidle-ux500.c
drivers/cpuidle/cpuidle-zynq.c
drivers/crypto/ixp4xx_crypto.c
drivers/devfreq/devfreq.c
drivers/devfreq/exynos/exynos4_bus.c
drivers/devfreq/exynos/exynos5_bus.c
drivers/dma/Kconfig
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/coh901318.c
drivers/dma/cppi41.c
drivers/dma/dma-jz4740.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/dma/edma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma_v3.c
drivers/dma/iop-adma.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/mv_xor.c
drivers/dma/mxs-dma.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/sa11x0-dma.c
drivers/dma/sh/rcar-hpbdma.c
drivers/dma/sh/shdma-base.c
drivers/dma/sh/shdmac.c
drivers/dma/ste_dma40.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/txx9dmac.c
drivers/firmware/dcdbas.c
drivers/firmware/google/gsmi.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-sa1100.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/btc_dpm.h
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-elo.c
drivers/hid/hid-holtek-mouse.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lenovo-tpkbd.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg2ff.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-sony.c
drivers/hid/hid-wiimote-core.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/hid-wiimote.h
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/uhid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hv/connection.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hwmon/abituguru.c
drivers/hwmon/abituguru3.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/adcxx.c
drivers/hwmon/adm1026.c
drivers/hwmon/adt7462.c
drivers/hwmon/applesmc.c
drivers/hwmon/asc7621.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/atxp1.c
drivers/hwmon/ds1621.c
drivers/hwmon/emc1403.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/hwmon.c
drivers/hwmon/ina209.c
drivers/hwmon/ina2xx.c
drivers/hwmon/jc42.c
drivers/hwmon/lm70.c
drivers/hwmon/lm73.c
drivers/hwmon/lm95234.c
drivers/hwmon/ltc4245.c
drivers/hwmon/ltc4261.c
drivers/hwmon/max16065.c
drivers/hwmon/max6642.c
drivers/hwmon/max6650.c
drivers/hwmon/max6697.c
drivers/hwmon/mc13783-adc.c
drivers/hwmon/nct6775.c
drivers/hwmon/pmbus/lm25066.c
drivers/hwmon/pmbus/ltc2978.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/tmp401.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-smbus.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/i2c/muxes/i2c-mux-gpio.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/ide/Kconfig
drivers/ide/Makefile
drivers/ide/ide-h8300.c [deleted file]
drivers/idle/intel_idle.c
drivers/iio/accel/bma180.c
drivers/iio/adc/at91_adc.c
drivers/iio/amplifiers/ad8366.c
drivers/iio/buffer_cb.c
drivers/iio/dac/mcp4725.c
drivers/iio/frequency/adf4350.c
drivers/iio/iio_core.h
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/temperature/tmp006.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2_ae.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/input.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/cm109.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/iommu/Kconfig
drivers/iommu/arm-smmu.c
drivers/irqchip/irq-gic.c
drivers/isdn/hardware/eicon/divasmain.c
drivers/isdn/hardware/eicon/um_idi.c
drivers/isdn/sc/init.c
drivers/mailbox/mailbox-omap2.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/dm-io.c
drivers/md/dm-mpath.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stats.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/tda10071.c
drivers/media/i2c/ad9389b.c
drivers/media/i2c/adv7511.c
drivers/media/i2c/adv7842.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/i2c/ths8200.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/marvell-ccic/mcam-core.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/isp.h
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/sh_vou.c
drivers/media/platform/soc_camera/mx3_camera.c
drivers/media/tuners/e4000.c
drivers/media/usb/gspca/conex.c
drivers/media/usb/gspca/cpia1.c
drivers/media/usb/gspca/gspca.c
drivers/media/usb/gspca/gspca.h
drivers/media/usb/gspca/jeilinj.c
drivers/media/usb/gspca/jl2005bcd.c
drivers/media/usb/gspca/m5602/m5602_mt9m111.c
drivers/media/usb/gspca/mars.c
drivers/media/usb/gspca/mr97310a.c
drivers/media/usb/gspca/nw80x.c
drivers/media/usb/gspca/ov519.c
drivers/media/usb/gspca/ov534.c
drivers/media/usb/gspca/pac207.c
drivers/media/usb/gspca/pac7311.c
drivers/media/usb/gspca/se401.c
drivers/media/usb/gspca/sn9c20x.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/gspca/sonixj.c
drivers/media/usb/gspca/spca1528.c
drivers/media/usb/gspca/spca500.c
drivers/media/usb/gspca/sq905c.c
drivers/media/usb/gspca/sq930x.c
drivers/media/usb/gspca/stk014.c
drivers/media/usb/gspca/stk1135.c
drivers/media/usb/gspca/stv06xx/stv06xx.c
drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
drivers/media/usb/gspca/sunplus.c
drivers/media/usb/gspca/topro.c
drivers/media/usb/gspca/tv8532.c
drivers/media/usb/gspca/vicam.c
drivers/media/usb/gspca/w996Xcf.c
drivers/media/usb/gspca/xirlink_cit.c
drivers/media/usb/gspca/zc3xx.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/media/usb/uvc/uvc_driver.c
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-common.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-dma-contig.c
drivers/media/v4l2-core/videobuf2-dma-sg.c
drivers/misc/eeprom/at24.c
drivers/misc/mei/amthif.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.h
drivers/misc/mei/hbm.c
drivers/misc/mei/init.c
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h
drivers/mmc/card/queue.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/devices/m25p80.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/ubi/attach.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/wl.c
drivers/net/Space.c
drivers/net/bonding/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c [new file with mode: 0644]
drivers/net/bonding/bond_options.c [new file with mode: 0644]
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mcp251x.c
drivers/net/can/mscan/mscan.h
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pci.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing.h
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/8390.h
drivers/net/ethernet/8390/Kconfig
drivers/net/ethernet/8390/Makefile
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/8390/ne-h8300.c [deleted file]
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/adi/bfin_mac.h
drivers/net/ethernet/amd/7990.h
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/atarilance.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c.h
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl2.h
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb/common.h
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb/pm3393.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/regs.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dec/tulip/xircom_cb.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/fujitsu/Kconfig
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/debug.h
drivers/net/ethernet/ibm/emac/rgmii.h
drivers/net/ethernet/ibm/emac/tah.h
drivers/net/ethernet/ibm/emac/zmii.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/ixgb/ixgb.h
drivers/net/ethernet/intel/ixgb/ixgb_hw.h
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_regs.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/io.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mdio_10g.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/phy.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc9194.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunqe.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw-phy-sel.c [new file with mode: 0644]
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/cpts.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/toshiba/ps3_gelic_net.h
drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/toshiba/spider_net.h
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/h/smc.h
drivers/net/fddi/skfp/skfddi.c
drivers/net/hamradio/baycom_ser_fdx.c
drivers/net/hamradio/baycom_ser_hdx.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/donauboe.c
drivers/net/irda/sh_irda.c
drivers/net/irda/sh_sir.c
drivers/net/irda/sir-dev.h
drivers/net/macvlan.c
drivers/net/phy/at803x.c
drivers/net/phy/marvell.c
drivers/net/phy/micrel.c
drivers/net/plip/plip.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/catc.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/dm9601.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/farsync.c
drivers/net/wan/hostess_sv11.c
drivers/net/wan/sealevel.c
drivers/net/wan/wanxl.c
drivers/net/wan/x25_asy.h
drivers/net/wan/z85230.h
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/i2400m.h
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath6kl/common.h
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
drivers/net/wireless/brcm80211/brcmsmac/antsel.h
drivers/net/wireless/brcm80211/brcmsmac/channel.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/main.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
drivers/net/wireless/brcm80211/brcmsmac/pmu.h
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/brcm80211/brcmsmac/rate.h
drivers/net/wireless/brcm80211/brcmsmac/stf.h
drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
drivers/net/wireless/brcm80211/include/brcmu_d11.h
drivers/net/wireless/brcm80211/include/brcmu_utils.h
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/cw1200/fwio.c
drivers/net/wireless/cw1200/hwbus.h
drivers/net/wireless/cw1200/hwio.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw.h
drivers/net/wireless/iwlegacy/3945.h
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/rs.h
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_aggr.h
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/orinoco/orinoco.h
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rtlwifi/cam.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.h
drivers/net/wireless/rtlwifi/rtl8192de/phy.h
drivers/net/wireless/rtlwifi/rtl8192de/rf.h
drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/Kconfig
drivers/of/Makefile
drivers/of/base.c
drivers/of/fdt.c
drivers/of/of_reserved_mem.c [deleted file]
drivers/of/platform.c
drivers/parport/Kconfig
drivers/parport/parport_pc.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-exynos.c
drivers/pci/host/pci-imx6.c [new file with mode: 0644]
drivers/pci/host/pci-tegra.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-designware.h
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_core.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/acpiphp_ibm.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pci/hotplug/shpchp.h
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-exynos.c
drivers/pinctrl/pinctrl-palmas.c
drivers/pinctrl/pinctrl-tegra114.c
drivers/platform/x86/Kconfig
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-rst.c
drivers/platform/x86/intel-smartconnect.c
drivers/platform/x86/intel_menlow.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/topstar-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/platform/x86/wmi.c
drivers/pnp/pnpacpi/core.c
drivers/powercap/Kconfig [new file with mode: 0644]
drivers/powercap/Makefile [new file with mode: 0644]
drivers/powercap/intel_rapl.c [new file with mode: 0644]
drivers/powercap/powercap_sys.c [new file with mode: 0644]
drivers/regulator/da9063-regulator.c
drivers/regulator/palmas-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/scm_blk.h
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_cmd.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/zcore.c
drivers/s390/cio/airq.c
drivers/s390/cio/cio.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/eadm_sch.h
drivers/s390/cio/qdio_debug.h
drivers/s390/cio/qdio_main.c
drivers/s390/crypto/zcrypt_debug.h
drivers/s390/net/claw.h
drivers/s390/net/ctcm_dbug.c
drivers/s390/net/lcs.h
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
drivers/s390/scsi/zfcp_dbf.h
drivers/scsi/BusLogic.c
drivers/scsi/bfa/bfad.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/spi/spi-atmel.c
drivers/spi/spi-clps711x.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-mpc512x-psc.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi.c
drivers/staging/comedi/Kconfig
drivers/staging/comedi/drivers/ni_65xx.c
drivers/staging/dgap/dgap_driver.c
drivers/staging/dgnc/dgnc_driver.c
drivers/staging/dwc2/platform.c
drivers/staging/et131x/et131x.c
drivers/staging/iio/Kconfig
drivers/staging/iio/light/isl29018.c
drivers/staging/iio/magnetometer/hmc5843.c
drivers/staging/iio/meter/ade7854-spi.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/line6/toneport.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
drivers/staging/lustre/lustre/Kconfig
drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
drivers/staging/lustre/lustre/libcfs/workitem.c
drivers/staging/lustre/lustre/obdecho/echo_client.c
drivers/staging/lustre/lustre/ptlrpc/pinger.c
drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/media/dt3155v4l/dt3155v4l.c
drivers/staging/media/msi3101/Kconfig
drivers/staging/media/msi3101/sdr-msi3101.c
drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
drivers/staging/octeon-usb/cvmx-usb.c
drivers/staging/rtl8188eu/core/rtw_ieee80211.c
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
drivers/staging/rtl8188eu/core/rtw_mp.c
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
drivers/staging/rtl8188eu/include/odm.h
drivers/staging/rtl8188eu/include/rtl8188e_hal.h
drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/rtl8192u/r819xU_cmdpkt.c
drivers/staging/vt6656/card.c
drivers/staging/vt6656/iwctl.c
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/rxtx.c
drivers/staging/xillybus/xillybus_core.c
drivers/staging/zram/zram_drv.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/thermal/Kconfig
drivers/thermal/intel_powerclamp.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
drivers/thermal/thermal_hwmon.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/thermal/x86_pkg_temp_thermal.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/hvc/hvsi_lib.c
drivers/tty/n_tty.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/imx.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/vt8500_serial.c
drivers/tty/tty_ioctl.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/ci_hdrc_pci.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/chipidea/udc.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/cdc2.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_ecm.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/fotg210-udc.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/gadget/lpc32xx_udc.c
drivers/usb/gadget/multi.c
drivers/usb/gadget/mv_u3d_core.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/storage_common.c
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-grlib.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-mv.c
drivers/usb/host/ehci-octeon.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ehci-pmcmsp.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/ehci-ps3.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-s5p.c
drivers/usb/host/ehci-sead3.c
drivers/usb/host/ehci-sh.c
drivers/usb/host/ehci-spear.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ehci-tilegx.c
drivers/usb/host/ehci-w90x900.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/imx21-hcd.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-exynos.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-octeon.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci-sa1111.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/ssb-hcd.c
drivers/usb/host/uhci-pci.c
drivers/usb/host/uhci-platform.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/Kconfig
drivers/usb/musb/am35x.c
drivers/usb/musb/da8xx.c
drivers/usb/musb/davinci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_virthub.c
drivers/usb/musb/tusb6010.c
drivers/usb/phy/phy-gpio-vbus-usb.c
drivers/usb/phy/phy-omap-usb3.c
drivers/usb/serial/Kconfig
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/video/amba-clcd.c
drivers/video/backlight/backlight.c
drivers/video/mmp/hw/mmp_ctrl.c
drivers/video/mxsfb.c
drivers/video/neofb.c
drivers/video/of_display_timing.c
drivers/video/omap2/displays-new/Kconfig
drivers/video/omap2/displays-new/connector-analog-tv.c
drivers/video/omap2/displays-new/connector-dvi.c
drivers/video/omap2/displays-new/connector-hdmi.c
drivers/video/omap2/dss/dispc.c
drivers/video/s3fb.c
drivers/w1/w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/hpwdt.c
drivers/watchdog/kempld_wdt.c
drivers/watchdog/sunxi_wdt.c
drivers/watchdog/ts72xx_wdt.c
drivers/xen/balloon.c
fs/9p/cache.c
fs/9p/cache.h
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/Makefile
fs/adfs/file.c
fs/affs/file.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/vlocation.c
fs/afs/volume.c
fs/afs/write.c
fs/aio.c
fs/bad_inode.c
fs/befs/linuxvfs.c
fs/bfs/file.c
fs/binfmt_elf.c
fs/bio-integrity.c
fs/bio.c
fs/block_dev.c
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/buffer.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/super.c
fs/ceph/super.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smbfsctl.h
fs/cifs/transport.c
fs/dcache.c
fs/direct-io.c
fs/dlm/lockspace.c
fs/ecryptfs/dentry.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/exofs/file.c
fs/ext2/file.c
fs/ext2/inode.c
fs/ext3/file.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fat/file.c
fs/fat/inode.c
fs/fscache/cookie.c
fs/fscache/fsdef.c
fs/fscache/netfs.c
fs/fscache/object.c
fs/fscache/page.c
fs/fuse/cuse.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/gfs2/sys.c
fs/gfs2/util.c
fs/gfs2/util.h
fs/gfs2/xattr.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hostfs/hostfs_kern.c
fs/hpfs/file.c
fs/internal.h
fs/iov-iter.c [new file with mode: 0644]
fs/jffs2/file.c
fs/jfs/file.c
fs/jfs/inode.c
fs/jfs/jfs_inode.c
fs/libfs.c
fs/logfs/dev_mtd.c
fs/logfs/file.c
fs/logfs/super.c
fs/minix/Kconfig
fs/minix/file.c
fs/namei.c
fs/namespace.c
fs/ncpfs/dir.c
fs/ncpfs/file.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/fscache.c
fs/nfs/fscache.h
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/proc.c
fs/nfs/unlink.c
fs/nfs/write.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsfh.c
fs/nfsd/nfsfh.h
fs/nfsd/vfs.c
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/nilfs2/page.c
fs/nilfs2/segment.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/dcache.c
fs/ocfs2/file.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/super.c
fs/omfs/file.c
fs/proc/inode.c
fs/proc/self.c
fs/proc/task_mmu.c
fs/ramfs/file-mmu.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/romfs/mmap-nommu.c
fs/statfs.c
fs/super.c
fs/sysv/file.c
fs/sysv/super.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/gc.c
fs/ubifs/journal.c
fs/ubifs/xattr.c
fs/udf/file.c
fs/udf/ialloc.c
fs/udf/inode.c
fs/udf/super.c
fs/udf/udf_sb.h
fs/ufs/file.c
fs/xfs/Makefile
fs/xfs/xfs_acl.c
fs/xfs/xfs_ag.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_alloc_btree.c
fs/xfs/xfs_alloc_btree.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_attr_remote.h
fs/xfs/xfs_bit.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap_btree.c
fs/xfs/xfs_bmap_btree.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_btree.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_da_btree.h
fs/xfs/xfs_da_format.h [moved from fs/xfs/xfs_dir2_format.h with 63% similarity]
fs/xfs/xfs_dir2.c
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_dir2_data.c
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_dir2_sf.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot.h
fs/xfs/xfs_dquot_buf.c [new file with mode: 0644]
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_error.c
fs/xfs/xfs_export.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_extent_busy.h
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_file.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_format.h
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_ialloc.h
fs/xfs/xfs_ialloc_btree.c
fs/xfs/xfs_ialloc_btree.h
fs/xfs/xfs_icache.c
fs/xfs/xfs_icreate_item.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_buf.c
fs/xfs/xfs_inode_buf.h
fs/xfs/xfs_inode_fork.c
fs/xfs/xfs_inode_fork.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl32.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_format.h
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_rlimit.c
fs/xfs/xfs_message.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_bhv.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quota.h
fs/xfs/xfs_quota_defs.h
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.h
fs/xfs/xfs_rtbitmap.c [new file with mode: 0644]
fs/xfs/xfs_sb.c
fs/xfs/xfs_sb.h
fs/xfs/xfs_shared.h [new file with mode: 0644]
fs/xfs/xfs_super.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_symlink.h
fs/xfs/xfs_symlink_remote.c
fs/xfs/xfs_trace.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_dquot.c
fs/xfs/xfs_trans_extfree.c
fs/xfs/xfs_trans_inode.c
fs/xfs/xfs_trans_priv.h
fs/xfs/xfs_trans_resv.c
fs/xfs/xfs_vnode.h
fs/xfs/xfs_xattr.c
include/acpi/acexcep.h
include/acpi/acpi_bus.h
include/acpi/acpixf.h
include/acpi/actypes.h
include/acpi/platform/aclinux.h
include/acpi/processor.h
include/asm-generic/hugetlb.h
include/asm-generic/vtime.h
include/drm/drmP.h
include/dt-bindings/pinctrl/omap.h
include/linux/acpi.h
include/linux/aio.h
include/linux/amba/bus.h
include/linux/backlight.h
include/linux/balloon_compaction.h
include/linux/bcma/bcma_driver_pci.h
include/linux/bio.h
include/linux/bitops.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/compiler-gcc4.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/dcache.h
include/linux/devfreq.h
include/linux/device-mapper.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/etherdevice.h
include/linux/fcdevice.h
include/linux/fddidevice.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/fscache.h
include/linux/hashtable.h
include/linux/hippidevice.h
include/linux/hwmon-vid.h
include/linux/hwmon.h
include/linux/hyperv.h
include/linux/i2c.h
include/linux/inetdevice.h
include/linux/intel-iommu.h
include/linux/ipv6.h
include/linux/irqchip/arm-gic.h
include/linux/jump_label.h
include/linux/jump_label_ratelimit.h
include/linux/kernel.h
include/linux/lockref.h
include/linux/memcontrol.h
include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
include/linux/miscdevice.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/mutex.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_comment.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_timeout.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/nf_conntrack_h323.h
include/linux/netfilter/nf_conntrack_proto_gre.h
include/linux/netfilter/nf_conntrack_sip.h
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ipv4.h
include/linux/netfilter_ipv6.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/of_irq.h
include/linux/of_reserved_mem.h [deleted file]
include/linux/opp.h [deleted file]
include/linux/page-flags.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/platform_data/at24.h [moved from include/linux/i2c/at24.h with 97% similarity]
include/linux/pm_opp.h [new file with mode: 0644]
include/linux/powercap.h [new file with mode: 0644]
include/linux/random.h
include/linux/regulator/driver.h
include/linux/sched.h
include/linux/serial_sci.h
include/linux/skbuff.h
include/linux/smp.h
include/linux/ssb/ssb_driver_gige.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/sched.h
include/linux/sunrpc/xprt.h
include/linux/timex.h
include/linux/usb/usb_phy_gen_xceiv.h
include/linux/usb/usbnet.h
include/linux/usb_usual.h
include/linux/vgaarb.h
include/linux/yam.h
include/media/v4l2-common.h
include/media/videobuf2-core.h
include/media/videobuf2-dma-sg.h
include/net/addrconf.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/rfcomm.h
include/net/caif/caif_hsi.h
include/net/cipso_ipv4.h
include/net/compat.h
include/net/dcbevent.h
include/net/dn.h
include/net/dn_dev.h
include/net/dn_fib.h
include/net/dn_neigh.h
include/net/dn_nsp.h
include/net/dn_route.h
include/net/dst.h
include/net/esp.h
include/net/fib_rules.h
include/net/flow.h
include/net/flow_keys.h
include/net/garp.h
include/net/gen_stats.h
include/net/genetlink.h
include/net/gre.h
include/net/icmp.h
include/net/inet6_connection_sock.h
include/net/inet6_hashtables.h
include/net/inet_common.h
include/net/inet_connection_sock.h
include/net/inet_frag.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_checksum.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/ipx.h
include/net/irda/ircomm_tty.h
include/net/irda/irda.h
include/net/irda/irda_device.h
include/net/irda/irlap_event.h
include/net/irda/irlap_frame.h
include/net/iw_handler.h
include/net/lapb.h
include/net/llc.h
include/net/llc_c_ac.h
include/net/llc_c_ev.h
include/net/llc_conn.h
include/net/llc_if.h
include/net/llc_pdu.h
include/net/llc_s_ac.h
include/net/llc_s_ev.h
include/net/llc_sap.h
include/net/mac802154.h
include/net/mrp.h
include/net/ndisc.h
include/net/net_namespace.h
include/net/netevent.h
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
include/net/netfilter/ipv4/nf_defrag_ipv4.h
include/net/netfilter/ipv6/nf_defrag_ipv6.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_acct.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_extend.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_conntrack_l3proto.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_conntrack_seqadj.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_conntrack_timestamp.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_nat_helper.h
include/net/netfilter/nf_nat_l3proto.h
include/net/netfilter/nf_nat_l4proto.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h [new file with mode: 0644]
include/net/netfilter/nf_tables_core.h [new file with mode: 0644]
include/net/netfilter/nf_tables_ipv4.h [new file with mode: 0644]
include/net/netfilter/nf_tables_ipv6.h [new file with mode: 0644]
include/net/netfilter/xt_rateest.h
include/net/netlink.h
include/net/netns/ipv4.h
include/net/netns/nftables.h [new file with mode: 0644]
include/net/netrom.h
include/net/p8022.h
include/net/ping.h
include/net/protocol.h
include/net/psnap.h
include/net/raw.h
include/net/rawv6.h
include/net/request_sock.h
include/net/rose.h
include/net/route.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/scm.h
include/net/sctp/sctp.h
include/net/secure_seq.h
include/net/sock.h
include/net/stp.h
include/net/tcp.h
include/net/tcp_memcontrol.h
include/net/udp.h
include/net/udplite.h
include/net/wext.h
include/net/wimax.h
include/net/x25.h
include/net/xfrm.h
include/sound/rcar_snd.h
include/trace/events/block.h
include/trace/events/btrfs.h
include/trace/events/power_cpu_migrate.h [new file with mode: 0644]
include/uapi/asm-generic/socket.h
include/uapi/drm/drm_mode.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/audit.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/elf-em.h
include/uapi/linux/if_bonding.h
include/uapi/linux/if_link.h
include/uapi/linux/loop.h
include/uapi/linux/netfilter/Kbuild
include/uapi/linux/netfilter/ipset/ip_set.h
include/uapi/linux/netfilter/nf_conntrack_common.h
include/uapi/linux/netfilter/nf_tables.h [new file with mode: 0644]
include/uapi/linux/netfilter/nf_tables_compat.h [new file with mode: 0644]
include/uapi/linux/netfilter/nfnetlink.h
include/uapi/linux/netfilter/nfnetlink_cttimeout.h
include/uapi/linux/pci_regs.h
include/uapi/linux/perf_event.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tc_act/tc_defact.h [moved from include/linux/tc_act/tc_defact.h with 75% similarity]
include/uapi/rdma/ib_user_verbs.h
include/xen/interface/io/netif.h
init/Kconfig
init/main.c
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/audit.c
kernel/cgroup.c
kernel/context_tracking.c
kernel/events/core.c
kernel/jump_label.c
kernel/kmod.c
kernel/params.c
kernel/pid.c
kernel/power/Kconfig
kernel/power/qos.c
kernel/power/snapshot.c
kernel/power/user.c
kernel/reboot.c
kernel/sched/fair.c
kernel/softirq.c
kernel/watchdog.c
lib/hexdump.c
lib/kobject.c
lib/lockref.c
lib/percpu-refcount.c
mm/Kconfig
mm/bounce.c
mm/compaction.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/hwpoison-inject.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/mlock.c
mm/mprotect.c
mm/mremap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_io.c
mm/shmem.c
mm/slab_common.c
mm/swapfile.c
mm/vmscan.c
mm/zswap.c
net/802/mrp.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_netlink.c
net/ax25/af_ax25.c
net/batman-adv/Makefile
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c [new file with mode: 0644]
net/batman-adv/fragmentation.h [new file with mode: 0644]
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c [deleted file]
net/batman-adv/unicast.h [deleted file]
net/batman-adv/vis.c [deleted file]
net/batman-adv/vis.h [deleted file]
net/bluetooth/hidp/hidp.h
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_private_stp.h
net/bridge/br_stp_if.c
net/bridge/br_vlan.c
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/nf_tables_bridge.c [new file with mode: 0644]
net/can/af_can.h
net/ceph/auth_none.h
net/ceph/auth_x.h
net/ceph/crypto.h
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/netprio_cgroup.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/utils.c
net/dccp/ackvec.h
net/dccp/ccid.h
net/dccp/ccids/lib/loss_interval.h
net/dccp/ccids/lib/packet_history.h
net/dccp/ccids/lib/tfrc.h
net/dccp/dccp.h
net/dccp/feat.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/ipv6.h
net/dccp/minisocks.c
net/dccp/output.c
net/dccp/proto.c
net/decnet/netfilter/dn_rtmsg.c
net/ethernet/eth.c
net/ieee802154/6lowpan.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_tables_arp.c [new file with mode: 0644]
net/ipv4/netfilter/nf_tables_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_chain_nat_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_chain_route_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_reject_ipv4.c [new file with mode: 0644]
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.h
net/ipv4/udp.c
net/ipv4/udp_impl.h
net/ipv4/udp_offload.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/inet6_connection_sock.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c [new file with mode: 0644]
net/ipv6/ipcomp6.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_tables_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nft_chain_nat_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nft_chain_route_ipv6.c [new file with mode: 0644]
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udp_offload.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/irda/irnet/irnet.h
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/lapb/lapb_timer.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mpls/mpls_gso.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/Kconfig
net/netfilter/ipset/Makefile
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_internals.h
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_sip.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c [new file with mode: 0644]
net/netfilter/nf_tables_core.c [new file with mode: 0644]
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_bitwise.c [new file with mode: 0644]
net/netfilter/nft_byteorder.c [new file with mode: 0644]
net/netfilter/nft_cmp.c [new file with mode: 0644]
net/netfilter/nft_compat.c [new file with mode: 0644]
net/netfilter/nft_counter.c [new file with mode: 0644]
net/netfilter/nft_ct.c [new file with mode: 0644]
net/netfilter/nft_expr_template.c [new file with mode: 0644]
net/netfilter/nft_exthdr.c [new file with mode: 0644]
net/netfilter/nft_hash.c [new file with mode: 0644]
net/netfilter/nft_immediate.c [new file with mode: 0644]
net/netfilter/nft_limit.c [new file with mode: 0644]
net/netfilter/nft_log.c [new file with mode: 0644]
net/netfilter/nft_lookup.c [new file with mode: 0644]
net/netfilter/nft_meta.c [new file with mode: 0644]
net/netfilter/nft_meta_target.c [new file with mode: 0644]
net/netfilter/nft_nat.c [new file with mode: 0644]
net/netfilter/nft_payload.c [new file with mode: 0644]
net/netfilter/nft_rbtree.c [new file with mode: 0644]
net/netfilter/x_tables.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_kapi.c
net/openvswitch/vport-vxlan.c
net/rds/connection.c
net/rds/rds.h
net/rxrpc/ar-internal.h
net/sched/act_police.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/em_ipset.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_fq.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_tbf.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/socket.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/sysctl_net.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.h
net/tipc/eth_media.c
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/unix/af_unix.c
net/unix/diag.c
net/wimax/wimax-internal.h
net/wireless/core.c
net/wireless/core.h
net/wireless/ibss.c
net/wireless/nl80211.c
net/wireless/radiotap.c
net/wireless/sysfs.h
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/checkpatch.pl
scripts/coccinelle/api/devm_request_and_ioremap.cocci [deleted file]
scripts/show_delta
scripts/tags.sh
security/apparmor/apparmorfs.c
security/apparmor/crypto.c
security/apparmor/include/policy.h
security/apparmor/policy.c
security/lsm_audit.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc.h
sound/arm/pxa2xx-pcm.c
sound/core/compress_offload.c
sound/pci/ac97/ac97_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/ppc/keywest.c
sound/soc/atmel/atmel-pcm.c
sound/soc/blackfin/bf5xx-ac97-pcm.c
sound/soc/blackfin/bf5xx-i2s-pcm.c
sound/soc/blackfin/bf6xx-i2s.c
sound/soc/codecs/88pm860x-codec.c
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/max98095.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/pcm1792a.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/davinci/davinci-pcm.c
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-mc13783.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/imx-sgtl5000.c
sound/soc/fsl/imx-ssi.c
sound/soc/fsl/imx-ssi.h
sound/soc/fsl/imx-wm8962.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/jz4740/jz4740-pcm.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/nuc900/nuc900-pcm.c
sound/soc/omap/Kconfig
sound/soc/omap/omap-pcm.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/s6000/s6000-pcm.c
sound/soc/samsung/dma.c
sound/soc/samsung/idma.c
sound/soc/sh/rcar/rsnd.h
sound/soc/soc-core.c
sound/usb/usx2y/us122l.c
sound/usb/usx2y/usbusx2yaudio.c
sound/usb/usx2y/usx2yhwdeppcm.c
tools/lib/lk/debugfs.c
tools/perf/Makefile
tools/perf/arch/arm/Makefile
tools/perf/arch/arm/include/perf_regs.h [new file with mode: 0644]
tools/perf/arch/arm/util/unwind.c [new file with mode: 0644]
tools/perf/arch/x86/util/tsc.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/feature-tests.mak
tools/perf/util/annotate.c
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/machine.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/symbol-elf.c
tools/perf/util/trace-event-parse.c
tools/perf/util/unwind.c
tools/power/x86/turbostat/turbostat.c
tools/testing/ktest/examples/crosstests.conf
tools/testing/selftests/timers/posix_timers.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e6c4404c80ade376feff7b6df406a..0640e16504832e43c2d3b9bb82b6b5fe3f5bea48 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
 S: Canada K2P 0X8
 
 N: Mikael Pettersson
-E: mikpe@it.uu.se
-W: http://user.it.uu.se/~mikpe/linux/
+E: mikpelinux@gmail.com
 D: Miscellaneous fixes
 
 N: Reed H. Petty
index 2be603c52a240fa55f1a630d4f3567cc9c134d19..a6b68572474062bf04329a434cb27bbf825d19b8 100644 (file)
@@ -37,8 +37,8 @@ Description:
                that the USB device has been connected to the machine.  This
                file is read-only.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/device/.../power/active_duration
 Date:          January 2008
@@ -57,8 +57,8 @@ Description:
                will give an integer percentage.  Note that this does not
                account for counter wrap.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
 Date:          January 2008
index bdc00707c751b955d997aa7ce0c51bd8b0222ca5..7f34a95bb9634c9d8d1d9505d31b68c36bc58f76 100644 (file)
@@ -1,13 +1,13 @@
 
 What:           /sys/class/net/<iface>/batman-adv/iface_status
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates the status of <iface> as it is seen by batman.
 
 What:           /sys/class/net/<iface>/batman-adv/mesh_iface
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 The /sys/class/net/<iface>/batman-adv/mesh_iface file
                 displays the batman mesh interface this <iface>
index bdcd8b4e38f2dd404acdc36bcd123f72ff5d2ed3..0baa657b18c4719048bf7048affea6fba8f79221 100644 (file)
@@ -1,22 +1,23 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates whether the batman protocol messages of the
                 mesh <mesh_iface> shall be aggregated or not.
 
-What:           /sys/class/net/<mesh_iface>/mesh/ap_isolation
+What:           /sys/class/net/<mesh_iface>/mesh/<vlan_subdir>/ap_isolation
 Date:           May 2011
-Contact:        Antonio Quartulli <ordex@autistici.org>
+Contact:        Antonio Quartulli <antonio@meshcoding.com>
 Description:
                 Indicates whether the data traffic going from a
                 wireless client to another wireless client will be
-                silently dropped.
+                silently dropped. <vlan_subdir> is empty when referring
+               to the untagged lan.
 
 What:           /sys/class/net/<mesh_iface>/mesh/bonding
 Date:           June 2010
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the data traffic going through the
                 mesh will be sent using multiple interfaces at the
@@ -24,7 +25,7 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
 Date:           November 2011
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the bridge loop avoidance feature
                 is enabled. This feature detects and avoids loops
@@ -41,21 +42,21 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the bandwidth which is propagated by this
                 node if gw_mode was set to 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_mode
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the state of the gateway features. Can be
                 either 'off', 'client' or 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_sel_class
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the selection criteria this node will use
                 to choose a gateway if gw_mode was set to 'client'.
@@ -77,25 +78,14 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/orig_interval
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the interval in milliseconds in which batman
                 sends its protocol messages.
 
 What:           /sys/class/net/<mesh_iface>/mesh/routing_algo
 Date:           Dec 2011
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the routing procotol this mesh instance
                 uses to find the optimal paths through the mesh.
-
-What:           /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Each batman node only maintains information about its
-                own local neighborhood, therefore generating graphs
-                showing the topology of the entire mesh is not easily
-                feasible without having a central instance to collect
-                the local topologies from all nodes. This file allows
-                to activate the collecting (server) mode.
diff --git a/Documentation/ABI/testing/sysfs-class-powercap b/Documentation/ABI/testing/sysfs-class-powercap
new file mode 100644 (file)
index 0000000..db3b3ff
--- /dev/null
@@ -0,0 +1,152 @@
+What:          /sys/class/powercap/
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               The powercap/ class sub directory belongs to the power cap
+               subsystem. Refer to
+               Documentation/power/powercap/powercap.txt for details.
+
+What:          /sys/class/powercap/<control type>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               A <control type> is a unique name under /sys/class/powercap.
+               Here <control type> determines how the power is going to be
+               controlled. A <control type> can contain multiple power zones.
+
+What:          /sys/class/powercap/<control type>/enabled
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               This allows to enable/disable power capping for a "control type".
+               This status affects every power zone using this "control_type.
+
+What:          /sys/class/powercap/<control type>/<power zone>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               A power zone is a single or a collection of devices, which can
+               be independently monitored and controlled. A power zone sysfs
+               entry is qualified with the name of the <control type>.
+               E.g. intel-rapl:0:1:1.
+
+What:          /sys/class/powercap/<control type>/<power zone>/<child power zone>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Power zones may be organized in a hierarchy in which child
+               power zones provide monitoring and control for a subset of
+               devices under the parent. For example, if there is a parent
+               power zone for a whole CPU package, each CPU core in it can
+               be a child power zone.
+
+What:          /sys/class/powercap/.../<power zone>/name
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Specifies the name of this power zone.
+
+What:          /sys/class/powercap/.../<power zone>/energy_uj
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Current energy counter in micro-joules. Write "0" to reset.
+               If the counter can not be reset, then this attribute is
+               read-only.
+
+What:          /sys/class/powercap/.../<power zone>/max_energy_range_uj
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Range of the above energy counter in micro-joules.
+
+
+What:          /sys/class/powercap/.../<power zone>/power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Current power in micro-watts.
+
+What:          /sys/class/powercap/.../<power zone>/max_power_range_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Range of the above power value in micro-watts.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_name
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Each power zone can define one or more constraints. Each
+               constraint can have an optional name. Here "X" can have values
+               from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_power_limit_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Power limit in micro-watts should be applicable for
+               the time window specified by "constraint_X_time_window_us".
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Time window in micro seconds. This is used along with
+               constraint_X_power_limit_uw to define a power constraint.
+               Here "X" can have values from 0 to max integer.
+
+
+What:          /sys/class/powercap/<control type>/.../constraint_X_max_power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Maximum allowed power in micro watts for this constraint.
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/<control type>/.../constraint_X_min_power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Minimum allowed power in micro watts for this constraint.
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_max_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Maximum allowed time window in micro seconds for this
+               constraint. Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_min_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Minimum allowed time window in micro seconds for this
+               constraint. Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/enabled
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description
+               This allows to enable/disable power capping at power zone level.
+               This applies to current power zone and its children.
index 9d43e76708413bdf6b3d25a0b1179714b06680ba..efe449bdf811db7a1c9f74f38a0371d2c0dd692e 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/devices/.../power/
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power directory contains attributes
                allowing the user space to check and modify some power
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/wakeup attribute allows the user
                space to check if the device is enabled to wake up the system
@@ -34,7 +34,7 @@ Description:
 
 What:          /sys/devices/.../power/control
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/control attribute allows the user
                space to control the run-time power management of the device.
@@ -53,7 +53,7 @@ Description:
 
 What:          /sys/devices/.../power/async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../async attribute allows the user space to
                enable or diasble the device's suspend and resume callbacks to
@@ -79,7 +79,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_count attribute contains the number
                of signaled wakeup events associated with the device.  This
@@ -88,7 +88,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active_count attribute contains the
                number of times the processing of wakeup events associated with
@@ -98,7 +98,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_abort_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_abort_count attribute contains the
                number of times the processing of a wakeup event associated with
@@ -109,7 +109,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_expire_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_expire_count attribute contains the
                number of times a wakeup event associated with the device has
@@ -119,7 +119,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active attribute contains either 1,
                or 0, depending on whether or not a wakeup event associated with
@@ -129,7 +129,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_total_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_total_time_ms attribute contains
                the total time of processing wakeup events associated with the
@@ -139,7 +139,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_max_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_max_time_ms attribute contains
                the maximum time of processing a single wakeup event associated
@@ -149,7 +149,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_last_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_last_time_ms attribute contains
                the value of the monotonic clock corresponding to the time of
@@ -160,7 +160,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_prevent_sleep_time_ms
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
                contains the total time the device has been preventing
@@ -189,7 +189,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_latency_us
 Date:          March 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_resume_latency_us attribute
                contains the PM QoS resume latency limit for the given device,
@@ -207,7 +207,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_no_power_off
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_no_power_off attribute
                is used for manipulating the PM QoS "no power off" flag.  If
@@ -222,7 +222,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_remote_wakeup
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_remote_wakeup attribute
                is used for manipulating the PM QoS "remote wakeup required"
index 217772615d0288f996e05cc128b98c9a40d95042..205a7387844106804de496ec9b3a8c372b725977 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/power/
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power directory will contain files that will
                provide a unified interface to the power management
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/power/state
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/state file controls the system power state.
                Reading from this file returns what states are supported,
@@ -22,7 +22,7 @@ Description:
 
 What:          /sys/power/disk
 Date:          September 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/disk file controls the operating mode of the
                suspend-to-disk mechanism.  Reading from this file returns
@@ -67,7 +67,7 @@ Description:
 
 What:          /sys/power/image_size
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/image_size file controls the size of the image
                created by the suspend-to-disk mechanism.  It can be written a
@@ -84,7 +84,7 @@ Description:
 
 What:          /sys/power/pm_trace
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_trace file controls the code which saves the
                last PM event point in the RTC across reboots, so that you can
@@ -133,7 +133,7 @@ Description:
 
 What:          /sys/power/pm_async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_async file controls the switch allowing the
                user space to enable or disable asynchronous suspend and resume
@@ -146,7 +146,7 @@ Description:
 
 What:          /sys/power/wakeup_count
 Date:          July 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wakeup_count file allows user space to put the
                system into a sleep state while taking into account the
@@ -161,7 +161,7 @@ Description:
 
 What:          /sys/power/reserved_size
 Date:          May 2011
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/reserved_size file allows user space to control
                the amount of memory reserved for allocations made by device
@@ -175,7 +175,7 @@ Description:
 
 What:          /sys/power/autosleep
 Date:          April 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/autosleep file can be written one of the strings
                returned by reads from /sys/power/state.  If that happens, a
@@ -192,7 +192,7 @@ Description:
 
 What:          /sys/power/wake_lock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_lock file allows user space to create
                wakeup source objects and activate them on demand (if one of
@@ -219,7 +219,7 @@ Description:
 
 What:          /sys/power/wake_unlock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_unlock file allows user space to deactivate
                wakeup sources created with the help of /sys/power/wake_lock.
index 14129f149a75432589f3bc925f7776a59354ef62..5e983031cc11be35fba1aab90546db5a7de1eeb3 100644 (file)
@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
 because this shows that you did think about these issues wrt. your
 device.
 
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
 
-       int dma_set_mask(struct device *dev, u64 mask);
+       int dma_set_mask_and_coherent(struct device *dev, u64 mask);
 
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
 
-       int dma_set_coherent_mask(struct device *dev, u64 mask);
+       The query for streaming mappings is performed via a call to
+       dma_set_mask():
+
+               int dma_set_mask(struct device *dev, u64 mask);
+
+       The query for consistent allocations is performed via a call
+       to dma_set_coherent_mask():
+
+               int dma_set_coherent_mask(struct device *dev, u64 mask);
 
 Here, dev is a pointer to the device struct of your device, and mask
 is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@ exactly why.
 
 The standard 32-bit addressing device would do something like this:
 
-       if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
                printk(KERN_WARNING
                       "mydev: No suitable DMA available.\n");
                goto ignore_this_device;
@@ -171,22 +180,20 @@ the case would look like this:
 
        int using_dac, consistent_using_dac;
 
-       if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
                using_dac = 1;
                consistent_using_dac = 1;
-               dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
-       } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
                using_dac = 0;
                consistent_using_dac = 0;
-               dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
        } else {
                printk(KERN_WARNING
                       "mydev: No suitable DMA available.\n");
                goto ignore_this_device;
        }
 
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
 device driver only uses consistent allocations, one would have to
 check the return value from dma_set_coherent_mask().
 
@@ -199,9 +206,9 @@ address you might do something like:
                goto ignore_this_device;
        }
 
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided.  The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided.  The
+kernel will use this information later when you make DMA mappings.
 
 There is a case which we are aware of at this time, which is worth
 mentioning in this documentation.  If your device supports multiple
index 78a6c569d204bc0073e33fe093d34a8137e5eaf4..e865279cec5855818d83eb281fcce06ee0510040 100644 (file)
@@ -141,6 +141,14 @@ won't change the current mask settings.  It is more intended as an
 internal API for use by the platform than an external API for use by
 driver writers.
 
+int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
 int
 dma_set_mask(struct device *dev, u64 mask)
 
index bccf602a87f5c2054b146c826725ac183863a8ad..6f458564d625a3d82601703ceed6a814e9e14585 100644 (file)
@@ -525,8 +525,9 @@ corresponding register block for you.
 6. Other interesting functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-pci_find_slot()                        Find pci_dev corresponding to given bus and
-                               slot numbers.
+pci_get_domain_bus_and_slot()  Find pci_dev corresponding to given domain,
+                               bus and slot and number. If the device is
+                               found, its reference count is increased.
 pci_set_power_state()          Set PCI Power Management state (0=D0 ... 3=D3)
 pci_find_capability()          Find specified capability in device's capability
                                list.
@@ -582,7 +583,8 @@ having sane locking.
 
 pci_find_device()      Superseded by pci_get_device()
 pci_find_subsys()      Superseded by pci_get_subsys()
-pci_find_slot()                Superseded by pci_get_slot()
+pci_find_slot()                Superseded by pci_get_domain_bus_and_slot()
+pci_get_slot()         Superseded by pci_get_domain_bus_and_slot()
 
 
 The alternative is the traditional PCI device driver that walks PCI
index febbb1ba4d2317b984e7217796ac39151f867531..784841caa6e63824ff2503351fb12e682f01e3b3 100644 (file)
@@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
 
 When to use this method is described in detail on the
 Linux/ACPI home page:
-http://www.lesswatts.org/projects/acpi/overridingDSDT.php
+https://01.org/linux-acpi/documentation/overriding-dsdt
index 264e9841563aa6ebb162b545fb3442b85f00c38b..d9995f1f51b3eb9e678a557a471e4d387db49ca8 100644 (file)
@@ -18,17 +18,17 @@ this byte for application use, with the following caveats:
            parameters containing user virtual addresses *must* have
            their top byte cleared before trapping to the kernel.
 
-       (2) Tags are not guaranteed to be preserved when delivering
-           signals. This means that signal handlers in applications
-           making use of tags cannot rely on the tag information for
-           user virtual addresses being maintained for fields inside
-           siginfo_t. One exception to this rule is for signals raised
-           in response to debug exceptions, where the tag information
+       (2) Non-zero tags are not preserved when delivering signals.
+           This means that signal handlers in applications making use
+           of tags cannot rely on the tag information for user virtual
+           addresses being maintained for fields inside siginfo_t.
+           One exception to this rule is for signals raised in response
+           to watchpoint debug exceptions, where the tag information
            will be preserved.
 
        (3) Special care should be taken when using tagged pointers,
            since it is likely that C compilers will not hazard two
-           addresses differing only in the upper bits.
+           virtual addresses differing only in the upper byte.
 
 The architecture prevents the use of a tagged PC, so the upper byte will
 be set to a sign-extension of bit 55 on exception return.
index d18ecd827c408d0fb42a8d8a7fc669ce88c95fc2..929d9904f74b7eb94bac71e81308b0bf335c3108 100644 (file)
@@ -6,6 +6,8 @@ capability.txt
        - Generic Block Device Capability (/sys/block/<device>/capability)
 cfq-iosched.txt
        - CFQ IO scheduler tunables
+cmdline-partition.txt
+       - how to specify block device partitions on kernel command line
 data-integrity.txt
        - Block data integrity
 deadline-iosched.txt
index 2bbf4cc40c3f7f92a02df9bd42b1bdabc8a93791..525b9f6d7fb49e4c9bbd5f7ea76833dcde640e59 100644 (file)
@@ -1,9 +1,9 @@
-Embedded device command line partition
+Embedded device command line partition parsing
 =====================================================================
 
-Read block device partition table from command line.
-The partition used for fixed block device (eMMC) embedded device.
-It is no MBR, save storage space. Bootloader can be easily accessed
+Support for reading the block device partition table from the command line.
+It is typically used for fixed block (eMMC) embedded devices.
+It has no MBR, so saves storage space. Bootloader can be easily accessed
 by absolute address of data on the block device.
 Users can easily change the partition.
 
index 4848db8c71ff5118244888b620251f49831c92f2..8a4da64e02a8b08f3ad7ed6359ef713d7afd3b54 100644 (file)
@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
        nlh->nlmsg_seq = seq++;
        nlh->nlmsg_pid = getpid();
        nlh->nlmsg_type = NLMSG_DONE;
-       nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
+       nlh->nlmsg_len = size;
        nlh->nlmsg_flags = 0;
 
        m = NLMSG_DATA(nlh);
index 23721d3be3e6160e2057b0b11ef002e2828f458f..80b72419ffd8a83dfc8e150d5f5088ec69e4f0d9 100644 (file)
@@ -414,6 +414,7 @@ Your cooperation is appreciated.
                200 = /dev/net/tun      TAP/TUN network device
                201 = /dev/button/gulpb Transmeta GULP-B buttons
                202 = /dev/emd/ctl      Enhanced Metadisk RAID (EMD) control
+               203 = /dev/cuse         Cuse (character device in user-space)
                204 = /dev/video/em8300         EM8300 DVD decoder control
                205 = /dev/video/em8300_mv      EM8300 DVD decoder video
                206 = /dev/video/em8300_ma      EM8300 DVD decoder audio
index 92d36e2aa87791c75af70f23da1da3169c94f96c..f28d82bbbc56b3f3dff7716ede2f200f315d8e70 100644 (file)
@@ -36,14 +36,18 @@ specific to ARM.
 
        - reg
                Usage: required
-               Value type: <prop-encoded-array>
+               Value type: Integer cells. A register entry, expressed as a pair
+                           of cells, containing base and size.
                Definition: A standard property. Specifies base physical
                            address of CCI control registers common to all
                            interfaces.
 
        - ranges:
                Usage: required
-               Value type: <prop-encoded-array>
+               Value type: Integer cells. An array of range entries, expressed
+                           as a tuple of cells, containing child address,
+                           parent address and the size of the region in the
+                           child address space.
                Definition: A standard property. Follow rules in the ePAPR for
                            hierarchical bus addressing. CCI interfaces
                            addresses refer to the parent node addressing
@@ -74,11 +78,49 @@ specific to ARM.
 
                - reg:
                        Usage: required
-                       Value type: <prop-encoded-array>
+                       Value type: Integer cells. A register entry, expressed
+                                   as a pair of cells, containing base and
+                                   size.
                        Definition: the base address and size of the
                                    corresponding interface programming
                                    registers.
 
+       - CCI PMU node
+
+               Parent node must be CCI interconnect node.
+
+               A CCI pmu node must contain the following properties:
+
+               - compatible
+                       Usage: required
+                       Value type: <string>
+                       Definition: must be "arm,cci-400-pmu"
+
+               - reg:
+                       Usage: required
+                       Value type: Integer cells. A register entry, expressed
+                                   as a pair of cells, containing base and
+                                   size.
+                       Definition: the base address and size of the
+                                   corresponding interface programming
+                                   registers.
+
+               - interrupts:
+                       Usage: required
+                       Value type: Integer cells. Array of interrupt specifier
+                                   entries, as defined in
+                                   ../interrupt-controller/interrupts.txt.
+                       Definition: list of counter overflow interrupts, one per
+                                   counter. The interrupts must be specified
+                                   starting with the cycle counter overflow
+                                   interrupt, followed by counter0 overflow
+                                   interrupt, counter1 overflow interrupt,...
+                                   ,counterN overflow interrupt.
+
+                                   The CCI PMU has an interrupt signal for each
+                                   counter. The number of interrupts must be
+                                   equal to the number of counters.
+
 * CCI interconnect bus masters
 
        Description: masters in the device tree connected to a CCI port
@@ -144,7 +186,7 @@ Example:
                #address-cells = <1>;
                #size-cells = <1>;
                reg = <0x0 0x2c090000 0 0x1000>;
-               ranges = <0x0 0x0 0x2c090000 0x6000>;
+               ranges = <0x0 0x0 0x2c090000 0x10000>;
 
                cci_control0: slave-if@1000 {
                        compatible = "arm,cci-400-ctrl-if";
@@ -163,6 +205,16 @@ Example:
                        interface-type = "ace";
                        reg = <0x5000 0x1000>;
                };
+
+               pmu@9000 {
+                        compatible = "arm,cci-400-pmu";
+                        reg = <0x9000 0x5000>;
+                        interrupts = <0 101 4>,
+                                     <0 102 4>,
+                                     <0 103 4>,
+                                     <0 104 4>,
+                                     <0 105 4>;
+               };
        };
 
 This CCI node corresponds to a CCI component whose control registers sits
index e1f343c7a34b7b10ea462a39b5e9a320ef463ba6..f69bcf5a6343bf314b5eef199999c5a676131e74 100644 (file)
@@ -28,7 +28,7 @@ The three cells in order are:
 dependent:
   - bit 7-0: peripheral identifier for the hardware handshaking interface. The
   identifier can be different for tx and rx.
-  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
+  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
new file mode 100644 (file)
index 0000000..897cfcd
--- /dev/null
@@ -0,0 +1,23 @@
+I2C for R-Car platforms
+
+Required properties:
+- compatible: Must be one of
+       "renesas,i2c-rcar"
+       "renesas,i2c-r8a7778"
+       "renesas,i2c-r8a7779"
+       "renesas,i2c-r8a7790"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- interrupts: interrupt specifier.
+
+Optional properties:
+- clock-frequency: desired I2C bus clock frequency in Hz. The absence of this
+  propoerty indicates the default frequency 100 kHz.
+
+Examples :
+
+i2c0: i2c@e6500000 {
+       compatible = "renesas,i2c-rcar-h2";
+       reg = <0 0xe6500000 0 0x428>;
+       interrupts = <0 174 0x4>;
+};
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
deleted file mode 100644 (file)
index eb24693..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-*** Memory binding ***
-
-The /memory node provides basic information about the address and size
-of the physical memory. This node is usually filled or updated by the
-bootloader, depending on the actual memory configuration of the given
-hardware.
-
-The memory layout is described by the following node:
-
-/ {
-       #address-cells = <(n)>;
-       #size-cells = <(m)>;
-       memory {
-               device_type = "memory";
-               reg =  <(baseaddr1) (size1)
-                       (baseaddr2) (size2)
-                       ...
-                       (baseaddrN) (sizeN)>;
-       };
-       ...
-};
-
-A memory node follows the typical device tree rules for "reg" property:
-n:             number of cells used to store base address value
-m:             number of cells used to store size value
-baseaddrX:     defines a base address of the defined memory bank
-sizeX:         the size of the defined memory bank
-
-
-More than one memory bank can be defined.
-
-
-*** Reserved memory regions ***
-
-In /memory/reserved-memory node one can create child nodes describing
-particular reserved (excluded from normal use) memory regions. Such
-memory regions are usually designed for the special usage by various
-device drivers. A good example are contiguous memory allocations or
-memory sharing with other operating system on the same hardware board.
-Those special memory regions might depend on the board configuration and
-devices used on the target system.
-
-Parameters for each memory region can be encoded into the device tree
-with the following convention:
-
-[(label):] (name) {
-       compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-       reg = <(address) (size)>;
-       (linux,default-contiguous-region);
-};
-
-compatible:    one or more of:
-       - "linux,contiguous-memory-region" - enables binding of this
-         region to Contiguous Memory Allocator (special region for
-         contiguous memory allocations, shared with movable system
-         memory, Linux kernel-specific).
-       - "reserved-memory-region" - compatibility is defined, given
-         region is assigned for exclusive usage for by the respective
-         devices.
-
-reg:   standard property defining the base address and size of
-       the memory region
-
-linux,default-contiguous-region: property indicating that the region
-       is the default region for all contiguous memory
-       allocations, Linux specific (optional)
-
-It is optional to specify the base address, so if one wants to use
-autoconfiguration of the base address, '0' can be specified as a base
-address in the 'reg' property.
-
-The /memory/reserved-memory node must contain the same #address-cells
-and #size-cells value as the root node.
-
-
-*** Device node's properties ***
-
-Once regions in the /memory/reserved-memory node have been defined, they
-may be referenced by other device nodes. Bindings that wish to reference
-memory regions should explicitly document their use of the following
-property:
-
-memory-region = <&phandle_to_defined_region>;
-
-This property indicates that the device driver should use the memory
-region pointed by the given phandle.
-
-
-*** Example ***
-
-This example defines a memory consisting of 4 memory banks. 3 contiguous
-regions are defined for Linux kernel, one default of all device drivers
-(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
-framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
-and one for multimedia processing (labelled multimedia_mem, placed at
-0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
-device for DMA memory allocations (Linux kernel drivers will use CMA is
-available or dma-exclusive usage otherwise). 'multimedia_mem' is
-assigned to scaler@12500000 and codec@12600000 devices for contiguous
-memory allocations when CMA driver is enabled.
-
-The reason for creating a separate region for framebuffer device is to
-match the framebuffer base address to the one configured by bootloader,
-so once Linux kernel drivers starts no glitches on the displayed boot
-logo appears. Scaller and codec drivers should share the memory
-allocations.
-
-/ {
-       #address-cells = <1>;
-       #size-cells = <1>;
-
-       /* ... */
-
-       memory {
-               reg =  <0x40000000 0x10000000
-                       0x50000000 0x10000000
-                       0x60000000 0x10000000
-                       0x70000000 0x10000000>;
-
-               reserved-memory {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       /*
-                        * global autoconfigured region for contiguous allocations
-                        * (used only with Contiguous Memory Allocator)
-                        */
-                       contig_region@0 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x0 0x4000000>;
-                               linux,default-contiguous-region;
-                       };
-
-                       /*
-                        * special region for framebuffer
-                        */
-                       display_region: region@78000000 {
-                               compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-                               reg = <0x78000000 0x800000>;
-                       };
-
-                       /*
-                        * special region for multimedia processing devices
-                        */
-                       multimedia_region: region@77000000 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x77000000 0x4000000>;
-                       };
-               };
-       };
-
-       /* ... */
-
-       fb0: fb@12300000 {
-               status = "okay";
-               memory-region = <&display_region>;
-       };
-
-       scaler: scaler@12500000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-
-       codec: codec@12600000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-};
index 6d1c0988cfc7c7a0466d9f0cc18cc7feacdb0d2b..c67b975c89063f51fa20ae563f601c7c6113fe08 100644 (file)
@@ -1,11 +1,11 @@
-* Samsung Exynos specific extensions to the Synopsis Designware Mobile
+* Samsung Exynos specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
index 8a3d91d47b6af2e3e99d6182e25f21007b129f94..c559f3f36309e57f1eccda86e926771047960cbb 100644 (file)
@@ -1,11 +1,11 @@
-* Rockchip specific extensions to the Synopsis Designware Mobile
+* Rockchip specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
similarity index 93%
rename from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
rename to Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f55f55e87f7647d2d8670c04e470b4..066a78b034ca8589e4de4d31183075768496f81a 100644 (file)
@@ -1,14 +1,14 @@
-* Synopsis Designware Mobile Storage Host Controller
+* Synopsys Designware Mobile Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
 differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsis Designware Mobile Storage Host Controller.
+properties used by the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
 * compatible: should be
-       - snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
+       - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
 * #address-cells: should be 1.
 * #size-cells: should be 0.
 
index df204e18e030181a23dceebb0bff9de64a84d0a3..6a2a1160a70defdbac92be8850152f1c4448cbde 100644 (file)
@@ -9,12 +9,15 @@ compulsory and any optional properties, common to all SD/MMC drivers, as
 described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
 optional bindings can be used.
 
+Required properties:
+- compatible:  "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
+               "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
+               "renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
+               "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
+               "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
+               "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
+               "renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
+               "renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
+
 Optional properties:
 - toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
-
-When used with Renesas SDHI hardware, the following compatibility strings
-configure various model-specific properties:
-
-"renesas,sh7372-sdhi": (default) compatible with SH7372
-"renesas,r8a7740-sdhi":        compatible with R8A7740: certain MMC/SD commands have to
-                       wait for the interface to become idle.
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
new file mode 100644 (file)
index 0000000..7ff57a1
--- /dev/null
@@ -0,0 +1,28 @@
+TI CPSW Phy mode Selection Device Tree Bindings
+-----------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,am3352-cpsw-phy-sel"
+- reg                  : physical base address and size of the cpsw
+                         registers map
+- reg-names            : names of the register map given in "reg" node
+
+Optional properties:
+-rmii-clock-ext                : If present, the driver will configure the RMII
+                         interface to external clock usage
+
+Examples:
+
+       phy_sel: cpsw-phy-sel@44e10650 {
+               compatible = "ti,am3352-cpsw-phy-sel";
+               reg= <0x44e10650 0x4>;
+               reg-names = "gmii-sel";
+       };
+
+(or)
+       phy_sel: cpsw-phy-sel@44e10650 {
+               compatible = "ti,am3352-cpsw-phy-sel";
+               reg= <0x44e10650 0x4>;
+               reg-names = "gmii-sel";
+               rmii-clock-ext;
+       };
index 2c6be0377f55d0963970972c4d2b494e73f4507f..d2ea4605d0789dc8d11ff3e1fd8686a30431a43b 100644 (file)
@@ -86,6 +86,7 @@ General Properties:
 
 Clock Properties:
 
+  - fsl,cksel        Timer reference clock source.
   - fsl,tclk-period  Timer reference clock period in nanoseconds.
   - fsl,tmr-prsc     Prescaler, divides the output clock.
   - fsl,tmr-add      Frequency compensation value.
@@ -97,7 +98,7 @@ Clock Properties:
   clock. You must choose these carefully for the clock to work right.
   Here is how to figure good values:
 
-  TimerOsc     = system clock               MHz
+  TimerOsc     = selected reference clock   MHz
   tclk_period  = desired clock period       nanoseconds
   NominalFreq  = 1000 / tclk_period         MHz
   FreqDivRatio = TimerOsc / NominalFreq     (must be greater that 1.0)
@@ -114,6 +115,20 @@ Clock Properties:
   Pulse Per Second (PPS) signal, since this will be offered to the PPS
   subsystem to synchronize the Linux clock.
 
+  Reference clock source is determined by the value, which is holded
+  in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
+  value, which will be directly written in those bits, that is why,
+  according to reference manual, the next clock sources can be used:
+
+  <0> - external high precision timer reference clock (TSEC_TMR_CLK
+        input is used for this purpose);
+  <1> - eTSEC system clock;
+  <2> - eTSEC1 transmit clock;
+  <3> - RTC clock input.
+
+  When this attribute is not used, eTSEC system clock will serve as
+  IEEE 1588 timer reference clock.
+
 Example:
 
        ptp_clock@24E00 {
@@ -121,6 +136,7 @@ Example:
                reg = <0x24E00 0xB0>;
                interrupts = <12 0x8 13 0x8>;
                interrupt-parent = < &ipic >;
+               fsl,cksel       = <1>;
                fsl,tclk-period = <10>;
                fsl,tmr-prsc    = <100>;
                fsl,tmr-add     = <0x999999A4>;
index eabcb4b5db6e6711b244ea9a35e7b4ff711c12ef..d5d26d443693ce70db814c9466bbc72d87d6b7fe 100644 (file)
@@ -1,9 +1,9 @@
-* Synopsis Designware PCIe interface
+* Synopsys Designware PCIe interface
 
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
        core, plus an identifier for the specific instance, such
-       as "samsung,exynos5440-pcie".
+       as "samsung,exynos5440-pcie" or "fsl,imx6q-pcie".
 - reg: base addresses and lengths of the pcie controller,
        the phy controller, additional register for the phy controller.
 - interrupts: interrupt values for level interrupt,
@@ -21,6 +21,11 @@ Required properties:
 - num-lanes: number of lanes to use
 - reset-gpio: gpio pin number of power good signal
 
+Optional properties for fsl,imx6q-pcie
+- power-on-gpio: gpio pin number of power-enable signal
+- wake-up-gpio: gpio pin number of incoming wakeup signal
+- disable-gpio: gpio pin number of outgoing rfkill/endpoint disable signal
+
 Example:
 
 SoC specific DT Entry:
diff --git a/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt
new file mode 100644 (file)
index 0000000..6ad1adf
--- /dev/null
@@ -0,0 +1,53 @@
+* Renesas SH-Mobile Serial Communication Interface
+
+Required properties:
+- compatible : Should be "renesas,sci-<port type>-uart", where <port type> may be
+  SCI, SCIF, IRDA, SCIFA or SCIFB.
+- reg : Address and length of the register set for the device
+- interrupts : Should contain the following IRQs: ERI, RXI, TXI and BRI.
+- cell-index : The device id.
+- renesas,scscr : Should contain a bitfield used by the Serial Control Register.
+  b7 = SCSCR_TIE
+  b6 = SCSCR_RIE
+  b5 = SCSCR_TE
+  b4 = SCSCR_RE
+  b3 = SCSCR_REIE
+  b2 = SCSCR_TOIE
+  b1 = SCSCR_CKE1
+  b0 = SCSCR_CKE0
+- renesas,scbrr-algo-id : Algorithm ID for the Bit Rate Register
+  1 = SCBRR_ALGO_1 ((clk + 16 * bps) / (16 * bps) - 1)
+  2 = SCBRR_ALGO_2 ((clk + 16 * bps) / (32 * bps) - 1)
+  3 = SCBRR_ALGO_3 (((clk * 2) + 16 * bps) / (16 * bps) - 1)
+  4 = SCBRR_ALGO_4 (((clk * 2) + 16 * bps) / (32 * bps) - 1)
+  5 = SCBRR_ALGO_5 (((clk * 1000 / 32) / bps) - 1)
+
+Optional properties:
+- renesas,autoconf : Set if device is capable of auto configuration
+- renesas,regtype : Overwrite the register layout. In most cases you can rely
+  on auto-probing (omit this property or set to 0) but some legacy devices
+  use a non-default register layout. Possible layouts are
+  0 = SCIx_PROBE_REGTYPE (default)
+  1 = SCIx_SCI_REGTYPE
+  2 = SCIx_IRDA_REGTYPE
+  3 = SCIx_SCIFA_REGTYPE
+  4 = SCIx_SCIFB_REGTYPE
+  5 = SCIx_SH2_SCIF_FIFODATA_REGTYPE
+  6 = SCIx_SH3_SCIF_REGTYPE
+  7 = SCIx_SH4_SCIF_REGTYPE
+  8 = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
+  9 = SCIx_SH4_SCIF_FIFODATA_REGTYPE
+ 10 = SCIx_SH7705_SCIF_REGTYPE
+
+
+Example:
+       sci@0xe6c50000 {
+               compatible = "renesas,sci-SCIFA-uart";
+               interrupt-parent = <&intca>;
+               reg = <0xe6c50000 0x100>;
+               interrupts = <0x0c20>, <0x0c20>, <0x0c20>, <0x0c20>;
+               cell-index = <1>;
+               renesas,scscr = <0x30>;
+               renesas,scbrr-algo-id = <4>;
+               renesas,autoconf;
+       };
index fe7afe22538149706eab5727c989d91a8530c387..21ef48f0778f25f9415fd856408aeb78747f260c 100644 (file)
@@ -192,8 +192,8 @@ prototypes:
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
                                unsigned long *);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
@@ -426,7 +426,9 @@ prototypes:
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
index 11a0a40ce445fa5c2f6fb0276dd63907eb55574d..aed6b94160b1cec74face20a05f9c03f03a057f2 100644 (file)
@@ -29,15 +29,16 @@ This document contains the following sections:
         (6) Index registration
         (7) Data file registration
         (8) Miscellaneous object registration
-        (9) Setting the data file size
+        (9) Setting the data file size
        (10) Page alloc/read/write
        (11) Page uncaching
        (12) Index and data file consistency
-       (13) Miscellaneous cookie operations
-       (14) Cookie unregistration
-       (15) Index invalidation
-       (16) Data file invalidation
-       (17) FS-Cache specific page flags.
+       (13) Cookie enablement
+       (14) Miscellaneous cookie operations
+       (15) Cookie unregistration
+       (16) Index invalidation
+       (17) Data file invalidation
+       (18) FS-Cache specific page flags.
 
 
 =============================
@@ -334,7 +335,8 @@ the path to the file:
        struct fscache_cookie *
        fscache_acquire_cookie(struct fscache_cookie *parent,
                               const struct fscache_object_def *def,
-                              void *netfs_data);
+                              void *netfs_data,
+                              bool enable);
 
 This function creates an index entry in the index represented by parent,
 filling in the index entry by calling the operations pointed to by def.
@@ -350,6 +352,10 @@ object needs to be created somewhere down the hierarchy.  Furthermore, an index
 may be created in several different caches independently at different times.
 This is all handled transparently, and the netfs doesn't see any of it.
 
+A cookie will be created in the disabled state if enabled is false.  A cookie
+must be enabled to do anything with it.  A disabled cookie can be enabled by
+calling fscache_enable_cookie() (see below).
+
 For example, with AFS, a cell would be added to the primary index.  This index
 entry would have a dependent inode containing a volume location index for the
 volume mappings within this cell:
@@ -357,7 +363,7 @@ volume mappings within this cell:
        cell->cache =
                fscache_acquire_cookie(afs_cache_netfs.primary_index,
                                       &afs_cell_cache_index_def,
-                                      cell);
+                                      cell, true);
 
 Then when a volume location was accessed, it would be entered into the cell's
 index and an inode would be allocated that acts as a volume type and hash chain
@@ -366,7 +372,7 @@ combination:
        vlocation->cache =
                fscache_acquire_cookie(cell->cache,
                                       &afs_vlocation_cache_index_def,
-                                      vlocation);
+                                      vlocation, true);
 
 And then a particular flavour of volume (R/O for example) could be added to
 that index, creating another index for vnodes (AFS inode equivalents):
@@ -374,7 +380,7 @@ that index, creating another index for vnodes (AFS inode equivalents):
        volume->cache =
                fscache_acquire_cookie(vlocation->cache,
                                       &afs_volume_cache_index_def,
-                                      volume);
+                                      volume, true);
 
 
 ======================
@@ -388,7 +394,7 @@ the object definition should be something other than index type.
        vnode->cache =
                fscache_acquire_cookie(volume->cache,
                                       &afs_vnode_cache_object_def,
-                                      vnode);
+                                      vnode, true);
 
 
 =================================
@@ -404,7 +410,7 @@ it would be some other type of object such as a data file.
        xattr->cache =
                fscache_acquire_cookie(vnode->cache,
                                       &afs_xattr_cache_object_def,
-                                      xattr);
+                                      xattr, true);
 
 Miscellaneous objects might be used to store extended attributes or directory
 entries for example.
@@ -733,6 +739,47 @@ Note that partial updates may happen automatically at other times, such as when
 data blocks are added to a data file object.
 
 
+=================
+COOKIE ENABLEMENT
+=================
+
+Cookies exist in one of two states: enabled and disabled.  If a cookie is
+disabled, it ignores all attempts to acquire child cookies; check, update or
+invalidate its state; allocate, read or write backing pages - though it is
+still possible to uncache pages and relinquish the cookie.
+
+The initial enablement state is set by fscache_acquire_cookie(), but the cookie
+can be enabled or disabled later.  To disable a cookie, call:
+    
+       void fscache_disable_cookie(struct fscache_cookie *cookie,
+                                   bool invalidate);
+    
+If the cookie is not already disabled, this locks the cookie against other
+enable and disable ops, marks the cookie as being disabled, discards or
+invalidates any backing objects and waits for cessation of activity on any
+associated object before unlocking the cookie.
+
+All possible failures are handled internally.  The caller should consider
+calling fscache_uncache_all_inode_pages() afterwards to make sure all page
+markings are cleared up.
+    
+Cookies can be enabled or reenabled with:
+    
+       void fscache_enable_cookie(struct fscache_cookie *cookie,
+                                  bool (*can_enable)(void *data),
+                                  void *data)
+    
+If the cookie is not already enabled, this locks the cookie against other
+enable and disable ops, invokes can_enable() and, if the cookie is not an index
+cookie, will begin the procedure of acquiring backing objects.
+
+The optional can_enable() function is passed the data argument and returns a
+ruling as to whether or not enablement should actually be permitted to begin.
+
+All possible failures are handled internally.  The cookie will only be marked
+as enabled if provisional backing objects are allocated.
+
+
 ===============================
 MISCELLANEOUS COOKIE OPERATIONS
 ===============================
@@ -778,7 +825,7 @@ COOKIE UNREGISTRATION
 To get rid of a cookie, this function should be called.
 
        void fscache_relinquish_cookie(struct fscache_cookie *cookie,
-                                      int retire);
+                                      bool retire);
 
 If retire is non-zero, then the object will be marked for recycling, and all
 copies of it will be removed from all active caches in which it is present.
index deb48b5fd88327d8249b19aa8de16aab6855c191..47fa5a3e91858fedf5d6a0009e35f0e6edf920e8 100644 (file)
@@ -573,8 +573,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        struct page* (*get_xip_page)(struct address_space *, sector_t,
                        int);
        /* migrate the contents of a page to the specified target */
@@ -790,7 +790,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -825,10 +827,16 @@ otherwise noted.
 
   aio_read: called by io_submit(2) and other asynchronous I/O operations
 
+  read_iter: aio_read replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   write: called by write(2) and related system calls
 
   aio_write: called by io_submit(2) and other asynchronous I/O operations
 
+  write_iter: aio_write replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   iterate: called when the VFS needs to read the directory contents
 
   poll: called by the VFS when a process wants to check if there is
index c1b57d72efc33c69c05b1856c7631930e5ac28bd..b34c3de5c1bcfccf76f64b3442cf3433bc6ff472 100644 (file)
@@ -8,6 +8,11 @@ Supported chips:
     Datasheets:
        http://www.ti.com/lit/gpn/lm25056
        http://www.ti.com/lit/gpn/lm25056a
+  * TI LM25063
+    Prefix: 'lm25063'
+    Addresses scanned: -
+    Datasheet:
+       To be announced
   * National Semiconductor LM25066
     Prefix: 'lm25066'
     Addresses scanned: -
@@ -32,7 +37,7 @@ Description
 -----------
 
 This driver supports hardware montoring for National Semiconductor / TI LM25056,
-LM25066, LM5064, and LM5064 Power Management, Monitoring, Control, and
+LM25063, LM25066, LM5064, and LM5066 Power Management, Monitoring, Control, and
 Protection ICs.
 
 The driver is a client driver to the core PMBus driver. Please see
@@ -64,8 +69,12 @@ in1_input            Measured input voltage.
 in1_average            Average measured input voltage.
 in1_min                        Minimum input voltage.
 in1_max                        Maximum input voltage.
+in1_crit               Critical high input voltage (LM25063 only).
+in1_lcrit              Critical low input voltage (LM25063 only).
 in1_min_alarm          Input voltage low alarm.
 in1_max_alarm          Input voltage high alarm.
+in1_lcrit_alarm                Input voltage critical low alarm (LM25063 only).
+in1_crit_alarm         Input voltage critical high alarm. (LM25063 only).
 
 in2_label              "vmon"
 in2_input              Measured voltage on VAUX pin
@@ -80,12 +89,16 @@ in3_input           Measured output voltage.
 in3_average            Average measured output voltage.
 in3_min                        Minimum output voltage.
 in3_min_alarm          Output voltage low alarm.
+in3_highest            Historical minimum output voltage (LM25063 only).
+in3_lowest             Historical maximum output voltage (LM25063 only).
 
 curr1_label            "iin"
 curr1_input            Measured input current.
 curr1_average          Average measured input current.
 curr1_max              Maximum input current.
+curr1_crit             Critical input current (LM25063 only).
 curr1_max_alarm                Input current high alarm.
+curr1_crit_alarm       Input current critical high alarm (LM25063 only).
 
 power1_label           "pin"
 power1_input           Measured input power.
@@ -95,6 +108,11 @@ power1_alarm                Input power alarm
 power1_input_highest   Historical maximum power.
 power1_reset_history   Write any value to reset maximum power history.
 
+power2_label           "pout". LM25063 only.
+power2_input           Measured output power.
+power2_max             Maximum output power limit.
+power2_crit            Critical output power limit.
+
 temp1_input            Measured temperature.
 temp1_max              Maximum temperature.
 temp1_crit             Critical high temperature.
index dc0d08c61305469305d86f2f3cd04a32fa2e56c6..a0546fc42273bb7fcdad6389c7afcdded4171ec2 100644 (file)
@@ -6,10 +6,15 @@ Supported chips:
     Prefix: 'ltc2974'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2974
-  * Linear Technology LTC2978
+  * Linear Technology LTC2977
+    Prefix: 'ltc2977'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2977
+  * Linear Technology LTC2978, LTC2978A
     Prefix: 'ltc2978'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2978
+              http://www.linear.com/product/ltc2978a
   * Linear Technology LTC3880
     Prefix: 'ltc3880'
     Addresses scanned: -
@@ -26,8 +31,9 @@ Description
 -----------
 
 LTC2974 is a quad digital power supply manager. LTC2978 is an octal power supply
-monitor. LTC3880 is a dual output poly-phase step-down DC/DC controller. LTC3883
-is a single phase step-down DC/DC controller.
+monitor. LTC2977 is a pin compatible replacement for LTC2978. LTC3880 is a dual
+output poly-phase step-down DC/DC controller. LTC3883 is a single phase
+step-down DC/DC controller.
 
 
 Usage Notes
@@ -49,21 +55,25 @@ Sysfs attributes
 in1_label              "vin"
 in1_input              Measured input voltage.
 in1_min                        Minimum input voltage.
-in1_max                        Maximum input voltage. LTC2974 and LTC2978 only.
-in1_lcrit              Critical minimum input voltage. LTC2974 and LTC2978
-                       only.
+in1_max                        Maximum input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit              Critical minimum input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_crit               Critical maximum input voltage.
 in1_min_alarm          Input voltage low alarm.
-in1_max_alarm          Input voltage high alarm. LTC2974 and LTC2978 only.
-in1_lcrit_alarm                Input voltage critical low alarm. LTC2974 and LTC2978
-                       only.
+in1_max_alarm          Input voltage high alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit_alarm                Input voltage critical low alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_crit_alarm         Input voltage critical high alarm.
-in1_lowest             Lowest input voltage. LTC2974 and LTC2978 only.
+in1_lowest             Lowest input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_highest            Highest input voltage.
 in1_reset_history      Reset input voltage history.
 
 in[N]_label            "vout[1-8]".
                        LTC2974: N=2-5
+                       LTC2977: N=2-9
                        LTC2978: N=2-9
                        LTC3880: N=2-3
                        LTC3883: N=2
@@ -83,21 +93,23 @@ in[N]_reset_history Reset output voltage history.
 temp[N]_input          Measured temperature.
                        On LTC2974, temp[1-4] report external temperatures,
                        and temp5 reports the chip temperature.
-                       On LTC2978, only one temperature measurement is
-                       supported and reports the chip temperature.
+                       On LTC2977 and LTC2978, only one temperature measurement
+                       is supported and reports the chip temperature.
                        On LTC3880, temp1 and temp2 report external
                        temperatures, and temp3 reports the chip temperature.
                        On LTC3883, temp1 reports an external temperature,
                        and temp2 reports the chip temperature.
-temp[N]_min            Mimimum temperature. LTC2974 and LTC2978 only.
+temp[N]_min            Mimimum temperature. LTC2974, LCT2977, and LTC2978 only.
 temp[N]_max            Maximum temperature.
 temp[N]_lcrit          Critical low temperature.
 temp[N]_crit           Critical high temperature.
-temp[N]_min_alarm      Temperature low alarm. LTC2974 and LTC2978 only.
+temp[N]_min_alarm      Temperature low alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
 temp[N]_max_alarm      Temperature high alarm.
 temp[N]_lcrit_alarm    Temperature critical low alarm.
 temp[N]_crit_alarm     Temperature critical high alarm.
-temp[N]_lowest         Lowest measured temperature. LTC2974 and LTC2978 only.
+temp[N]_lowest         Lowest measured temperature.
+                       LTC2974, LTC2977, and LTC2978 only.
                        Not supported for chip temperature sensor on LTC2974.
 temp[N]_highest                Highest measured temperature. Not supported for chip
                        temperature sensor on LTC2974.
@@ -109,6 +121,7 @@ power1_input                Measured input power.
 
 power[N]_label         "pout[1-4]".
                        LTC2974: N=1-4
+                       LTC2977: Not supported
                        LTC2978: Not supported
                        LTC3880: N=1-2
                        LTC3883: N=2
@@ -123,6 +136,7 @@ curr1_reset_history Reset input current history. LTC3883 only.
 
 curr[N]_label          "iout[1-4]".
                        LTC2974: N=1-4
+                       LTC2977: not supported
                        LTC2978: not supported
                        LTC3880: N=2-3
                        LTC3883: N=2
index 1a036cd972fb0c205109ed66b968c3d771f66418..fcbb736d55feb439c1152be71355d5ab79f8edd2 100644 (file)
@@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Format: <io>,<irq>,<mode>
                        See header of drivers/net/hamradio/baycom_ser_hdx.c.
 
+       blkdevparts=    Manual partition parsing of block device(s) for
+                       embedded devices based on command line input.
+                       See Documentation/block/cmdline-partition.txt
+
        boot_delay=     Milliseconds to delay each printk during boot.
                        Values larger than 10 seconds (10000) are changed to
                        no delay (0).
@@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        pages. In the event, a node is too small to have both
                        kernelcore and Movable pages, kernelcore pages will
                        take priority and other nodes will have a larger number
-                       of kernelcore pages.  The Movable zone is used for the
+                       of Movable pages.  The Movable zone is used for the
                        allocation of pages that may be reclaimed or moved
                        by the page migration subsystem.  This means that
                        HugeTLB pages may not be allocated from this zone.
@@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                the unplug protocol
                        never -- do not unplug even if version check succeeds
 
+       xen_nopvspin    [X86,XEN]
+                       Disables the ticketlock slowpath using Xen PV
+                       optimizations.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index c1d82047a4b151c35983656e5841e261ff22fd7f..89490beb3c0bf6bd0160c891b51e3cf07497f1ca 100644 (file)
@@ -69,8 +69,7 @@ folder:
 # aggregated_ogms        gw_bandwidth           log_level
 # ap_isolation           gw_mode                orig_interval
 # bonding                gw_sel_class           routing_algo
-# bridge_loop_avoidance  hop_penalty            vis_mode
-# fragmentation
+# bridge_loop_avoidance  hop_penalty            fragmentation
 
 
 There is a special folder for debugging information:
@@ -78,7 +77,7 @@ There is a special folder for debugging information:
 # ls /sys/kernel/debug/batman_adv/bat0/
 # bla_backbone_table  log                 transtable_global
 # bla_claim_table     originators         transtable_local
-# gateways            socket              vis_data
+# gateways            socket
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -127,51 +126,6 @@ ously assigned to interfaces now used by batman advanced, e.g.
 # ifconfig eth0 0.0.0.0
 
 
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh  node  must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each  node  is  either configured as "server" or as "client" (de-
-fault: "client").  Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the  mesh,  no  topology
-information   will  be  transmitted.  With  these  "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When  configured  as  server,  you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with  other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those  out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a  certain  source  inter-
-face.  Each  entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
-           (each line begins with it)
--> "TQ mac  value"  -  src mac's link quality towards mac address
-                       of a neighbor originator's interface which
-                       is being used for routing
--> "TT mac" - TT announced by source mac
--> "PRIMARY" - this  is a primary interface
--> "SEC mac" - secondary mac address of source
-               (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being  the  best.
-The TT entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network.  The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
 LOGGING/DEBUGGING
 -----------------
 
@@ -245,5 +199,5 @@ Mailing-list:   b.a.t.m.a.n@open-mesh.org (optional  subscription
 
 You can also contact the Authors:
 
-Marek  Lindner  <lindner_marek@yahoo.de>
-Simon  Wunderlich  <siwu@hrz.tu-chemnitz.de>
+Marek  Lindner  <mareklindner@neomailbox.ch>
+Simon  Wunderlich  <sw@simonwunderlich.de>
index 9b28e714831ae35fd0664debe4a1091825701024..3856ed2c45a9ffda4918ab3615af8dfc1862890f 100644 (file)
@@ -743,21 +743,16 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The IPv4 formula is
+               generate the hash.  The formula is
 
-               (((source IP XOR dest IP) AND 0xffff) XOR
-                       ( source MAC XOR destination MAC ))
-                               modulo slave count
+               hash = source MAC XOR destination MAC
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               The IPv6 formula is
-
-               hash = (source ip quad 2 XOR dest IP quad 2) XOR
-                      (source ip quad 3 XOR dest IP quad 3) XOR
-                      (source ip quad 4 XOR dest IP quad 4)
-
-               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       XOR (source MAC XOR destination MAC))
-                               modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
@@ -779,21 +774,16 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented IPv4 TCP and UDP packets is
-
-               ((source port XOR dest port) XOR
-                        ((source IP XOR dest IP) AND 0xffff)
-                               modulo slave count
+               The formula for unfragmented TCP and UDP packets is
 
-               The formula for unfragmented IPv6 TCP and UDP packets is
+               hash = source port, destination port (as in the header)
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               hash = (source port XOR dest port) XOR
-                      ((source ip quad 2 XOR dest IP quad 2) XOR
-                       (source ip quad 3 XOR dest IP quad 3) XOR
-                       (source ip quad 4 XOR dest IP quad 4))
-
-               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                For fragmented TCP or UDP packets and all other IPv4 and
                IPv6 protocol traffic, the source and destination port
@@ -801,10 +791,6 @@ xmit_hash_policy
                formula is the same as for the layer2 transmit hash
                policy.
 
-               The IPv4 policy is intended to mimic the behavior of
-               certain switches, notably Cisco switches with PFC2 as
-               well as some Foundry and IBM products.
-
                This algorithm is not fully 802.3ad compliant.  A
                single TCP or UDP conversation containing both
                fragmented and unfragmented packets will see packets
@@ -815,6 +801,26 @@ xmit_hash_policy
                conversations.  Other implementations of 802.3ad may
                or may not tolerate this noncompliance.
 
+       encap2+3
+
+               This policy uses the same formula as layer2+3 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
+       encap3+4
+
+               This policy uses the same formula as layer3+4 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
        The default value is layer2.  This option was added in bonding
        version 2.6.3.  In earlier versions of bonding, this parameter
        does not exist, and the layer2 policy is the only policy.  The
index 425c51d56aefb8b991b3d97b5d3f01564f9baaf6..b8a907dc01697890747ead021b836b4c60c24ae0 100644 (file)
@@ -42,7 +42,7 @@ We can represent these as three OPPs as the following {Hz, uV} tuples:
 
 OPP library provides a set of helper functions to organize and query the OPP
 information. The library is located in drivers/base/power/opp.c and the header
-is located in include/linux/opp.h. OPP library can be enabled by enabling
+is located in include/linux/pm_opp.h. OPP library can be enabled by enabling
 CONFIG_PM_OPP from power management menuconfig menu. OPP library depends on
 CONFIG_PM as certain SoCs such as Texas Instrument's OMAP framework allows to
 optionally boot at a certain OPP without needing cpufreq.
@@ -71,14 +71,14 @@ operations until that OPP could be re-enabled if possible.
 
 OPP library facilitates this concept in it's implementation. The following
 operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, opp_get_voltage, opp_get_freq, opp_get_opp_count
-and opp_init_cpufreq_table
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+and dev_pm_opp_init_cpufreq_table
 
-opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
+be used for dev_pm_opp_enable/disable functions to make an opp available as required.
 
 WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if opp_enable/disable functions are invoked for a device, the
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
 exact mechanism to trigger these or the notification mechanism to other
 dependent subsystems such as cpufreq are left to the discretion of the SoC
 specific framework which uses the OPP library. Similar care needs to be taken
@@ -96,24 +96,24 @@ using RCU read locks. The opp_find_freq_{exact,ceil,floor},
 opp_get_{voltage, freq, opp_count} fall into this category.
 
 opp_{add,enable,disable} are updaters which use mutex and implement it's own
-RCU locking mechanisms. opp_init_cpufreq_table acts as an updater and uses
+RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses
 mutex to implment RCU updater strategy. These functions should *NOT* be called
 under RCU locks and other contexts that prevent blocking functions in RCU or
 mutex operations from working.
 
 2. Initial OPP List Registration
 ================================
-The SoC implementation calls opp_add function iteratively to add OPPs per
+The SoC implementation calls dev_pm_opp_add function iteratively to add OPPs per
 device. It is expected that the SoC framework will register the OPP entries
 optimally- typical numbers range to be less than 5. The list generated by
 registering the OPPs is maintained by OPP library throughout the device
 operation. The SoC framework can subsequently control the availability of the
-OPPs dynamically using the opp_enable / disable functions.
+OPPs dynamically using the dev_pm_opp_enable / disable functions.
 
-opp_add - Add a new OPP for a specific domain represented by the device pointer.
+dev_pm_opp_add - Add a new OPP for a specific domain represented by the device pointer.
        The OPP is defined using the frequency and voltage. Once added, the OPP
        is assumed to be available and control of it's availability can be done
-       with the opp_enable/disable functions. OPP library internally stores
+       with the dev_pm_opp_enable/disable functions. OPP library internally stores
        and manages this information in the opp struct. This function may be
        used by SoC framework to define a optimal list as per the demands of
        SoC usage environment.
@@ -124,7 +124,7 @@ opp_add - Add a new OPP for a specific domain represented by the device pointer.
         soc_pm_init()
         {
                /* Do things */
-               r = opp_add(mpu_dev, 1000000, 900000);
+               r = dev_pm_opp_add(mpu_dev, 1000000, 900000);
                if (!r) {
                        pr_err("%s: unable to register mpu opp(%d)\n", r);
                        goto no_cpufreq;
@@ -143,44 +143,44 @@ functions return the matching pointer representing the opp if a match is
 found, else returns error. These errors are expected to be handled by standard
 error checks such as IS_ERR() and appropriate actions taken by the caller.
 
-opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
+dev_pm_opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
        availability. This function is especially useful to enable an OPP which
        is not available by default.
        Example: In a case when SoC framework detects a situation where a
        higher frequency could be made available, it can use this function to
-       find the OPP prior to call the opp_enable to actually make it available.
+       find the OPP prior to call the dev_pm_opp_enable to actually make it available.
         rcu_read_lock();
-        opp = opp_find_freq_exact(dev, 1000000000, false);
+        opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
         rcu_read_unlock();
         /* dont operate on the pointer.. just do a sanity check.. */
         if (IS_ERR(opp)) {
                pr_err("frequency not disabled!\n");
                /* trigger appropriate actions.. */
         } else {
-               opp_enable(dev,1000000000);
+               dev_pm_opp_enable(dev,1000000000);
         }
 
        NOTE: This is the only search function that operates on OPPs which are
        not available.
 
-opp_find_freq_floor - Search for an available OPP which is *at most* the
+dev_pm_opp_find_freq_floor - Search for an available OPP which is *at most* the
        provided frequency. This function is useful while searching for a lesser
        match OR operating on OPP information in the order of decreasing
        frequency.
        Example: To find the highest opp for a device:
         freq = ULONG_MAX;
         rcu_read_lock();
-        opp_find_freq_floor(dev, &freq);
+        dev_pm_opp_find_freq_floor(dev, &freq);
         rcu_read_unlock();
 
-opp_find_freq_ceil - Search for an available OPP which is *at least* the
+dev_pm_opp_find_freq_ceil - Search for an available OPP which is *at least* the
        provided frequency. This function is useful while searching for a
        higher match OR operating on OPP information in the order of increasing
        frequency.
        Example 1: To find the lowest opp for a device:
         freq = 0;
         rcu_read_lock();
-        opp_find_freq_ceil(dev, &freq);
+        dev_pm_opp_find_freq_ceil(dev, &freq);
         rcu_read_unlock();
        Example 2: A simplified implementation of a SoC cpufreq_driver->target:
         soc_cpufreq_target(..)
@@ -188,7 +188,7 @@ opp_find_freq_ceil - Search for an available OPP which is *at least* the
                /* Do stuff like policy checks etc. */
                /* Find the best frequency match for the req */
                rcu_read_lock();
-               opp = opp_find_freq_ceil(dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
                rcu_read_unlock();
                if (!IS_ERR(opp))
                        soc_switch_to_freq_voltage(freq);
@@ -208,34 +208,34 @@ as thermal considerations (e.g. don't use OPPx until the temperature drops).
 
 WARNING: Do not use these functions in interrupt context.
 
-opp_enable - Make a OPP available for operation.
+dev_pm_opp_enable - Make a OPP available for operation.
        Example: Lets say that 1GHz OPP is to be made available only if the
        SoC temperature is lower than a certain threshold. The SoC framework
        implementation might choose to do something as follows:
         if (cur_temp < temp_low_thresh) {
                /* Enable 1GHz if it was disabled */
                rcu_read_lock();
-               opp = opp_find_freq_exact(dev, 1000000000, false);
+               opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
                rcu_read_unlock();
                /* just error check */
                if (!IS_ERR(opp))
-                       ret = opp_enable(dev, 1000000000);
+                       ret = dev_pm_opp_enable(dev, 1000000000);
                else
                        goto try_something_else;
         }
 
-opp_disable - Make an OPP to be not available for operation
+dev_pm_opp_disable - Make an OPP to be not available for operation
        Example: Lets say that 1GHz OPP is to be disabled if the temperature
        exceeds a threshold value. The SoC framework implementation might
        choose to do something as follows:
         if (cur_temp > temp_high_thresh) {
                /* Disable 1GHz if it was enabled */
                rcu_read_lock();
-               opp = opp_find_freq_exact(dev, 1000000000, true);
+               opp = dev_pm_opp_find_freq_exact(dev, 1000000000, true);
                rcu_read_unlock();
                /* just error check */
                if (!IS_ERR(opp))
-                       ret = opp_disable(dev, 1000000000);
+                       ret = dev_pm_opp_disable(dev, 1000000000);
                else
                        goto try_something_else;
         }
@@ -247,7 +247,7 @@ information from the OPP structure is necessary. Once an OPP pointer is
 retrieved using the search functions, the following functions can be used by SoC
 framework to retrieve the information represented inside the OPP layer.
 
-opp_get_voltage - Retrieve the voltage represented by the opp pointer.
+dev_pm_opp_get_voltage - Retrieve the voltage represented by the opp pointer.
        Example: At a cpufreq transition to a different frequency, SoC
        framework requires to set the voltage represented by the OPP using
        the regulator framework to the Power Management chip providing the
@@ -256,15 +256,15 @@ opp_get_voltage - Retrieve the voltage represented by the opp pointer.
         {
                /* do things */
                rcu_read_lock();
-               opp = opp_find_freq_ceil(dev, &freq);
-               v = opp_get_voltage(opp);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+               v = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                if (v)
                        regulator_set_voltage(.., v);
                /* do other things */
         }
 
-opp_get_freq - Retrieve the freq represented by the opp pointer.
+dev_pm_opp_get_freq - Retrieve the freq represented by the opp pointer.
        Example: Lets say the SoC framework uses a couple of helper functions
        we could pass opp pointers instead of doing additional parameters to
        handle quiet a bit of data parameters.
@@ -273,8 +273,8 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
                /* do things.. */
                 max_freq = ULONG_MAX;
                 rcu_read_lock();
-                max_opp = opp_find_freq_floor(dev,&max_freq);
-                requested_opp = opp_find_freq_ceil(dev,&freq);
+                max_opp = dev_pm_opp_find_freq_floor(dev,&max_freq);
+                requested_opp = dev_pm_opp_find_freq_ceil(dev,&freq);
                 if (!IS_ERR(max_opp) && !IS_ERR(requested_opp))
                        r = soc_test_validity(max_opp, requested_opp);
                 rcu_read_unlock();
@@ -282,25 +282,25 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
         }
         soc_test_validity(..)
         {
-                if(opp_get_voltage(max_opp) < opp_get_voltage(requested_opp))
+                if(dev_pm_opp_get_voltage(max_opp) < dev_pm_opp_get_voltage(requested_opp))
                         return -EINVAL;
-                if(opp_get_freq(max_opp) < opp_get_freq(requested_opp))
+                if(dev_pm_opp_get_freq(max_opp) < dev_pm_opp_get_freq(requested_opp))
                         return -EINVAL;
                /* do things.. */
         }
 
-opp_get_opp_count - Retrieve the number of available opps for a device
+dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device
        Example: Lets say a co-processor in the SoC needs to know the available
        frequencies in a table, the main processor can notify as following:
         soc_notify_coproc_available_frequencies()
         {
                /* Do things */
                rcu_read_lock();
-               num_available = opp_get_opp_count(dev);
+               num_available = dev_pm_opp_get_opp_count(dev);
                speeds = kzalloc(sizeof(u32) * num_available, GFP_KERNEL);
                /* populate the table in increasing order */
                freq = 0;
-               while (!IS_ERR(opp = opp_find_freq_ceil(dev, &freq))) {
+               while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(dev, &freq))) {
                        speeds[i] = freq;
                        freq++;
                        i++;
@@ -313,7 +313,7 @@ opp_get_opp_count - Retrieve the number of available opps for a device
 
 6. Cpufreq Table Generation
 ===========================
-opp_init_cpufreq_table - cpufreq framework typically is initialized with
+dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
        cpufreq_frequency_table_cpuinfo which is provided with the list of
        frequencies that are available for operation. This function provides
        a ready to use conversion routine to translate the OPP layer's internal
@@ -326,7 +326,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
         soc_pm_init()
         {
                /* Do things */
-               r = opp_init_cpufreq_table(dev, &freq_table);
+               r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
                if (!r)
                        cpufreq_frequency_table_cpuinfo(policy, freq_table);
                /* Do other things */
@@ -336,7 +336,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
        addition to CONFIG_PM as power management feature is required to
        dynamically scale voltage and frequency in a system.
 
-opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
 
 7. Data Structures
 ==================
@@ -358,16 +358,16 @@ accessed by various functions as described above. However, the structures
 representing the actual OPPs and domains are internal to the OPP library itself
 to allow for suitable abstraction reusable across systems.
 
-struct opp - The internal data structure of OPP library which is used to
+struct dev_pm_opp - The internal data structure of OPP library which is used to
        represent an OPP. In addition to the freq, voltage, availability
        information, it also contains internal book keeping information required
        for the OPP library to operate on.  Pointer to this structure is
        provided back to the users such as SoC framework to be used as a
        identifier for OPP in the interactions with OPP layer.
 
-       WARNING: The struct opp pointer should not be parsed or modified by the
-       users. The defaults of for an instance is populated by opp_add, but the
-       availability of the OPP can be modified by opp_enable/disable functions.
+       WARNING: The struct dev_pm_opp pointer should not be parsed or modified by the
+       users. The defaults of for an instance is populated by dev_pm_opp_add, but the
+       availability of the OPP can be modified by dev_pm_opp_enable/disable functions.
 
 struct device - This is used to identify a domain to the OPP layer. The
        nature of the device and it's implementation is left to the user of
@@ -377,19 +377,19 @@ Overall, in a simplistic view, the data structure operations is represented as
 following:
 
 Initialization / modification:
-            +-----+        /- opp_enable
-opp_add --> | opp | <-------
-  |         +-----+        \- opp_disable
+            +-----+        /- dev_pm_opp_enable
+dev_pm_opp_add --> | opp | <-------
+  |         +-----+        \- dev_pm_opp_disable
   \-------> domain_info(device)
 
 Search functions:
-             /-- opp_find_freq_ceil  ---\   +-----+
-domain_info<---- opp_find_freq_exact -----> | opp |
-             \-- opp_find_freq_floor ---/   +-----+
+             /-- dev_pm_opp_find_freq_ceil  ---\   +-----+
+domain_info<---- dev_pm_opp_find_freq_exact -----> | opp |
+             \-- dev_pm_opp_find_freq_floor ---/   +-----+
 
 Retrieval functions:
-+-----+     /- opp_get_voltage
++-----+     /- dev_pm_opp_get_voltage
 | opp | <---
-+-----+     \- opp_get_freq
++-----+     \- dev_pm_opp_get_freq
 
-domain_info <- opp_get_opp_count
+domain_info <- dev_pm_opp_get_opp_count
diff --git a/Documentation/power/powercap/powercap.txt b/Documentation/power/powercap/powercap.txt
new file mode 100644 (file)
index 0000000..1e6ef16
--- /dev/null
@@ -0,0 +1,236 @@
+Power Capping Framework
+==================================
+
+The power capping framework provides a consistent interface between the kernel
+and the user space that allows power capping drivers to expose the settings to
+user space in a uniform way.
+
+Terminology
+=========================
+The framework exposes power capping devices to user space via sysfs in the
+form of a tree of objects. The objects at the root level of the tree represent
+'control types', which correspond to different methods of power capping.  For
+example, the intel-rapl control type represents the Intel "Running Average
+Power Limit" (RAPL) technology, whereas the 'idle-injection' control type
+corresponds to the use of idle injection for controlling power.
+
+Power zones represent different parts of the system, which can be controlled and
+monitored using the power capping method determined by the control type the
+given zone belongs to. They each contain attributes for monitoring power, as
+well as controls represented in the form of power constraints.  If the parts of
+the system represented by different power zones are hierarchical (that is, one
+bigger part consists of multiple smaller parts that each have their own power
+controls), those power zones may also be organized in a hierarchy with one
+parent power zone containing multiple subzones and so on to reflect the power
+control topology of the system.  In that case, it is possible to apply power
+capping to a set of devices together using the parent power zone and if more
+fine grained control is required, it can be applied through the subzones.
+
+
+Example sysfs interface tree:
+
+/sys/devices/virtual/powercap
+??? intel-rapl
+    ??? intel-rapl:0
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:0:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:0:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? enabled
+    ?   ??? uevent
+    ??? intel-rapl:1
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:1:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:1:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? uevent
+    ??? power
+    ?   ??? async
+    ?   []
+    ??? subsystem -> ../../../../class/power_cap
+    ??? enabled
+    ??? uevent
+
+The above example illustrates a case in which the Intel RAPL technology,
+available in Intel® IA-64 and IA-32 Processor Architectures, is used. There is one
+control type called intel-rapl which contains two power zones, intel-rapl:0 and
+intel-rapl:1, representing CPU packages.  Each of these power zones contains
+two subzones, intel-rapl:j:0 and intel-rapl:j:1 (j = 0, 1), representing the
+"core" and the "uncore" parts of the given CPU package, respectively.  All of
+the zones and subzones contain energy monitoring attributes (energy_uj,
+max_energy_range_uj) and constraint attributes (constraint_*) allowing controls
+to be applied (the constraints in the 'package' power zones apply to the whole
+CPU packages and the subzone constraints only apply to the respective parts of
+the given package individually). Since Intel RAPL doesn't provide instantaneous
+power value, there is no power_uw attribute.
+
+In addition to that, each power zone contains a name attribute, allowing the
+part of the system represented by that zone to be identified.
+For example:
+
+cat /sys/class/power_cap/intel-rapl/intel-rapl:0/name
+package-0
+
+The Intel RAPL technology allows two constraints, short term and long term,
+with two different time windows to be applied to each power zone.  Thus for
+each zone there are 2 attributes representing the constraint names, 2 power
+limits and 2 attributes representing the sizes of the time windows. Such that,
+constraint_j_* attributes correspond to the jth constraint (j = 0,1).
+
+For example:
+       constraint_0_name
+       constraint_0_power_limit_uw
+       constraint_0_time_window_us
+       constraint_1_name
+       constraint_1_power_limit_uw
+       constraint_1_time_window_us
+
+Power Zone Attributes
+=================================
+Monitoring attributes
+----------------------
+
+energy_uj (rw): Current energy counter in micro joules. Write "0" to reset.
+If the counter can not be reset, then this attribute is read only.
+
+max_energy_range_uj (ro): Range of the above energy counter in micro-joules.
+
+power_uw (ro): Current power in micro watts.
+
+max_power_range_uw (ro): Range of the above power value in micro-watts.
+
+name (ro): Name of this power zone.
+
+It is possible that some domains have both power ranges and energy counter ranges;
+however, only one is mandatory.
+
+Constraints
+----------------
+constraint_X_power_limit_uw (rw): Power limit in micro watts, which should be
+applicable for the time window specified by "constraint_X_time_window_us".
+
+constraint_X_time_window_us (rw): Time window in micro seconds.
+
+constraint_X_name (ro): An optional name of the constraint
+
+constraint_X_max_power_uw(ro): Maximum allowed power in micro watts.
+
+constraint_X_min_power_uw(ro): Minimum allowed power in micro watts.
+
+constraint_X_max_time_window_us(ro): Maximum allowed time window in micro seconds.
+
+constraint_X_min_time_window_us(ro): Minimum allowed time window in micro seconds.
+
+Except power_limit_uw and time_window_us other fields are optional.
+
+Common zone and control type attributes
+----------------------------------------
+enabled (rw): Enable/Disable controls at zone level or for all zones using
+a control type.
+
+Power Cap Client Driver Interface
+==================================
+The API summary:
+
+Call powercap_register_control_type() to register control type object.
+Call powercap_register_zone() to register a power zone (under a given
+control type), either as a top-level power zone or as a subzone of another
+power zone registered earlier.
+The number of constraints in a power zone and the corresponding callbacks have
+to be defined prior to calling powercap_register_zone() to register that zone.
+
+To Free a power zone call powercap_unregister_zone().
+To free a control type object call powercap_unregister_control_type().
+Detailed API can be generated using kernel-doc on include/linux/powercap.h.
index 71d8fe4e75d3e5301e25037a28d91600d633dba7..0f54333b0ff2990ce090f88087e17417a894b46c 100644 (file)
@@ -145,11 +145,13 @@ The action performed by the idle callback is totally dependent on the subsystem
 if the device can be suspended (i.e. if all of the conditions necessary for
 suspending the device are satisfied) and to queue up a suspend request for the
 device in that case.  If there is no idle callback, or if the callback returns
-0, then the PM core will attempt to carry out a runtime suspend of the device;
-in essence, it will call pm_runtime_suspend() directly.  To prevent this (for
-example, if the callback routine has started a delayed suspend), the routine
-should return a non-zero value.  Negative error return codes are ignored by the
-PM core.
+0, then the PM core will attempt to carry out a runtime suspend of the device,
+also respecting devices configured for autosuspend.  In essence this means a
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
+device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
+this circumstance).  To prevent this (for example, if the callback routine has
+started a delayed suspend), the routine must return a non-zero value.  Negative
+error return codes are ignored by the PM core.
 
 The helper functions provided by the PM core, described in Section 4, guarantee
 that the following constraints are met with respect to runtime PM callbacks for
@@ -308,7 +310,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
     - execute the subsystem-level idle callback for the device; returns an
       error code on failure, where -EINPROGRESS means that ->runtime_idle() is
       already being executed; if there is no callback or the callback returns 0
-      then run pm_runtime_suspend(dev) and return its result
+      then run pm_runtime_autosuspend(dev) and return its result
 
   int pm_runtime_suspend(struct device *dev);
     - execute the subsystem-level suspend callback for the device; returns 0 on
index f59ded06610813f058d30ae468dbcf3922fc34b7..a74d0a84d329a2909186256d9ad5fdb5f5327a2e 100644 (file)
@@ -100,6 +100,11 @@ static long ppb_to_scaled_ppm(int ppb)
        return (long) (ppb * 65.536);
 }
 
+static int64_t pctns(struct ptp_clock_time *t)
+{
+       return t->sec * 1000000000LL + t->nsec;
+}
+
 static void usage(char *progname)
 {
        fprintf(stderr,
@@ -112,6 +117,8 @@ static void usage(char *progname)
                " -f val     adjust the ptp clock frequency by 'val' ppb\n"
                " -g         get the ptp clock time\n"
                " -h         prints this message\n"
+               " -k val     measure the time offset between system and phc clock\n"
+               "            for 'val' times (Maximum 25)\n"
                " -p val     enable output with a period of 'val' nanoseconds\n"
                " -P val     enable or disable (val=1|0) the system clock PPS\n"
                " -s         set the ptp clock time from the system time\n"
@@ -133,8 +140,12 @@ int main(int argc, char *argv[])
        struct itimerspec timeout;
        struct sigevent sigevent;
 
+       struct ptp_clock_time *pct;
+       struct ptp_sys_offset *sysoff;
+
+
        char *progname;
-       int c, cnt, fd;
+       int i, c, cnt, fd;
 
        char *device = DEVICE;
        clockid_t clkid;
@@ -144,14 +155,19 @@ int main(int argc, char *argv[])
        int extts = 0;
        int gettime = 0;
        int oneshot = 0;
+       int pct_offset = 0;
+       int n_samples = 0;
        int periodic = 0;
        int perout = -1;
        int pps = -1;
        int settime = 0;
 
+       int64_t t1, t2, tp;
+       int64_t interval, offset;
+
        progname = strrchr(argv[0], '/');
        progname = progname ? 1+progname : argv[0];
-       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
+       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
                switch (c) {
                case 'a':
                        oneshot = atoi(optarg);
@@ -174,6 +190,10 @@ int main(int argc, char *argv[])
                case 'g':
                        gettime = 1;
                        break;
+               case 'k':
+                       pct_offset = 1;
+                       n_samples = atoi(optarg);
+                       break;
                case 'p':
                        perout = atoi(optarg);
                        break;
@@ -376,6 +396,47 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (pct_offset) {
+               if (n_samples <= 0 || n_samples > 25) {
+                       puts("n_samples should be between 1 and 25");
+                       usage(progname);
+                       return -1;
+               }
+
+               sysoff = calloc(1, sizeof(*sysoff));
+               if (!sysoff) {
+                       perror("calloc");
+                       return -1;
+               }
+               sysoff->n_samples = n_samples;
+
+               if (ioctl(fd, PTP_SYS_OFFSET, sysoff))
+                       perror("PTP_SYS_OFFSET");
+               else
+                       puts("system and phc clock time offset request okay");
+
+               pct = &sysoff->ts[0];
+               for (i = 0; i < sysoff->n_samples; i++) {
+                       t1 = pctns(pct+2*i);
+                       tp = pctns(pct+2*i+1);
+                       t2 = pctns(pct+2*i+2);
+                       interval = t2 - t1;
+                       offset = (t2 + t1) / 2 - tp;
+
+                       printf("system time: %ld.%ld\n",
+                               (pct+2*i)->sec, (pct+2*i)->nsec);
+                       printf("phc    time: %ld.%ld\n",
+                               (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+                       printf("system time: %ld.%ld\n",
+                               (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+                       printf("system/phc clock time offset is %ld ns\n"
+                               "system     clock time delay  is %ld ns\n",
+                               offset, interval);
+               }
+
+               free(sysoff);
+       }
+
        close(fd);
        return 0;
 }
index fcaf0b4efba21cfe51ed1aa4a6d9303aa614dea5..3da163383c931258641c69ce01f4f3812c97aafd 100644 (file)
@@ -157,6 +157,16 @@ Return Value:  none
 
 Description:   Sets new actual debug level if new_level is valid. 
 
+---------------------------------------------------------------------------
+bool debug_level_enabled (debug_info_t * id, int level);
+
+Parameter:    id:        handle for debug log
+             level:      debug level
+
+Return Value: True if level is less or equal to the current debug level.
+
+Description:  Returns true if debug events for the specified level would be
+             logged. Otherwise returns false.
 ---------------------------------------------------------------------------
 void debug_stop_all(void);
 
index b1b8587b86f0cc8153c2cedda5a3308f980828aa..9290de7034504186a1fedbd9a60ad3690abbfed6 100644 (file)
@@ -65,11 +65,6 @@ Possible arch/ problems
 
 Possible arch problems I found (and either tried to fix or didn't):
 
-h8300 - Is such sleeping racy vs interrupts? (See #4a).
-        The H8/300 manual I found indicates yes, however disabling IRQs
-        over the sleep mean only NMIs can wake it up, so can't fix easily
-        without doing spin waiting.
-
 ia64 - is safe_halt call racy vs interrupts? (does it sleep?) (See #4a)
 
 sh64 - Is sleeping racy vs interrupts? (See #4a)
index a46ddb85e83a0dcdf0f2a54fd9b745eadb31cb6f..85c362d8ea349350947a8363d36dbe7e622ea536 100644 (file)
@@ -28,6 +28,7 @@ ALC269/270/275/276/28x/29x
   alc269-dmic          Enable ALC269(VA) digital mic workaround
   alc271-dmic          Enable ALC271X digital mic workaround
   inv-dmic             Inverted internal mic workaround
+  headset-mic          Indicates a combined headset (headphone+mic) jack
   lenovo-dock          Enables docking station I/O for some Lenovos
   dell-headset-multi   Headset jack, which can also be used as mic-in
   dell-headset-dock    Headset jack (without mic-in), and also dock I/O
@@ -296,6 +297,12 @@ Cirrus Logic CS4206/4207
   imac27       IMac 27 Inch
   auto         BIOS setup (default)
 
+Cirrus Logic CS4208
+===================
+  mba6         MacBook Air 6,1 and 6,2
+  gpio0                Enable GPIO 0 amp
+  auto         BIOS setup (default)
+
 VIA VT17xx/VT18xx/VT20xx
 ========================
   auto         BIOS setup (default)
index da6cf1676f2ccadbfd08dee0348eba9b482b02be..72977cab5c2f46cd1befc921a97fa95c2ce7fe45 100644 (file)
@@ -237,11 +237,11 @@ F:        drivers/platform/x86/acer-wmi.c
 
 ACPI
 M:     Len Brown <lenb@kernel.org>
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-Q:     http://patchwork.kernel.org/project/linux-acpi/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
+W:     https://01.org/linux-acpi
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 S:     Supported
 F:     drivers/acpi/
 F:     drivers/pnp/pnpacpi/
@@ -253,24 +253,38 @@ F:        drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
 F:     drivers/pci/*/*/*acpi*
 
+ACPI COMPONENT ARCHITECTURE (ACPICA)
+M:     Robert Moore <robert.moore@intel.com>
+M:     Lv Zheng <lv.zheng@intel.com>
+M:     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+L:     linux-acpi@vger.kernel.org
+L:     devel@acpica.org
+W:     https://acpica.org/
+W:     https://github.com/acpica/acpica/
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+S:     Supported
+F:     drivers/acpi/acpica/
+F:     include/acpi/
+
 ACPI FAN DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/fan.c
 
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/*thermal*
 
 ACPI VIDEO DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/video.c
 
@@ -824,15 +838,21 @@ S:        Maintained
 F:     arch/arm/mach-gemini/
 
 ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
-M:     Barry Song <baohua.song@csr.com>
+M:     Barry Song <baohua@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
 S:     Maintained
 F:     arch/arm/mach-prima2/
+F:     drivers/clk/clk-prima2.c
+F:     drivers/clocksource/timer-prima2.c
+F:     drivers/clocksource/timer-marco.c
 F:     drivers/dma/sirf-dma.c
 F:     drivers/i2c/busses/i2c-sirf.c
+F:     drivers/input/misc/sirfsoc-onkey.c
+F:     drivers/irqchip/irq-sirfsoc.c
 F:     drivers/mmc/host/sdhci-sirf.c
 F:     drivers/pinctrl/sirf/
+F:     drivers/rtc/rtc-sirfsoc.c
 F:     drivers/spi/spi-sirf.c
 
 ARM/EBSA110 MACHINE SUPPORT
@@ -1396,7 +1416,7 @@ M:        Wolfram Sang <wsa@the-dreams.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/misc/eeprom/at24.c
-F:     include/linux/i2c/at24.h
+F:     include/linux/platform_data/at24.h
 
 ATA OVER ETHERNET (AOE) DRIVER
 M:     "Ed L. Cashin" <ecashin@coraid.com>
@@ -1652,9 +1672,9 @@ F:        drivers/video/backlight/
 F:     include/linux/backlight.h
 
 BATMAN ADVANCED
-M:     Marek Lindner <lindner_marek@yahoo.de>
-M:     Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M:     Antonio Quartulli <ordex@autistici.org>
+M:     Marek Lindner <mareklindner@neomailbox.ch>
+M:     Simon Wunderlich <sw@simonwunderlich.de>
+M:     Antonio Quartulli <antonio@meshcoding.com>
 L:     b.a.t.m.a.n@lists.open-mesh.org
 W:     http://www.open-mesh.org/
 S:     Maintained
@@ -1785,6 +1805,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Veaceslav Falico <vfalico@redhat.com>
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
@@ -1812,7 +1833,8 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
-M:     Christian Daudt <csd@broadcom.com>
+M:     Christian Daudt <bcm@fixthebug.org>
+L:     bcm-kernel-feedback-list@broadcom.com
 T:     git git://git.github.com/broadcom/bcm11351
 S:     Maintained
 F:     arch/arm/mach-bcm/
@@ -2293,7 +2315,7 @@ S:        Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
 
 CPU FREQUENCY DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
 L:     cpufreq@vger.kernel.org
 L:     linux-pm@vger.kernel.org
@@ -2324,7 +2346,7 @@ S:      Maintained
 F:      drivers/cpuidle/cpuidle-big_little.c
 
 CPUIDLE DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
@@ -2639,6 +2661,18 @@ F:       include/linux/device-mapper.h
 F:     include/linux/dm-*.h
 F:     include/uapi/linux/dm-*.h
 
+DIGI NEO AND CLASSIC PCI PRODUCTS
+M:     Lidza Louina <lidza.louina@gmail.com>
+L:     driverdev-devel@linuxdriverproject.org
+S:     Maintained
+F:     drivers/staging/dgnc/
+
+DIGI EPCA PCI PRODUCTS
+M:     Lidza Louina <lidza.louina@gmail.com>
+L:     driverdev-devel@linuxdriverproject.org
+S:     Maintained
+F:     drivers/staging/dgap/
+
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
@@ -2699,6 +2733,8 @@ T:        git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vinod.koul@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Supported
 F:     drivers/dma/
 F:     include/linux/dma*
@@ -3534,7 +3570,7 @@ F:        fs/freevxfs/
 
 FREEZER
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/freezing-of-tasks.txt
@@ -3605,6 +3641,12 @@ L:       linux-scsi@vger.kernel.org
 S:     Odd Fixes (e.g., new signatures)
 F:     drivers/scsi/fdomain.*
 
+GCOV BASED KERNEL PROFILING
+M:     Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+S:     Maintained
+F:     kernel/gcov/
+F:     Documentation/gcov.txt
+
 GDT SCSI DISK ARRAY CONTROLLER DRIVER
 M:     Achim Leubner <achim_leubner@adaptec.com>
 L:     linux-scsi@vger.kernel.org
@@ -3870,7 +3912,7 @@ F:        drivers/video/hgafb.c
 
 HIBERNATION (aka Software Suspend, aka swsusp)
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     arch/x86/power/
@@ -4320,7 +4362,7 @@ F:        drivers/video/i810/
 INTEL MENLOW THERMAL DRIVER
 M:     Sujith Thomas <sujith.thomas@intel.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
@@ -4332,7 +4374,10 @@ F:       arch/x86/kernel/microcode_intel.c
 
 INTEL I/OAT DMA DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
-S:     Maintained
+M:     Dave Jiang <dave.jiang@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
+S:     Supported
 F:     drivers/dma/ioat*
 
 INTEL IOMMU (VT-d)
@@ -4457,6 +4502,13 @@ L:       linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial/ioc3_serial.c
 
+IOMMU DRIVERS
+M:     Joerg Roedel <joro@8bytes.org>
+L:     iommu@lists.linux-foundation.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+S:     Maintained
+F:     drivers/iommu/
+
 IP MASQUERADING
 M:     Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
 S:     Maintained
@@ -6346,6 +6398,12 @@ S:       Supported
 F:     Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 F:     drivers/pci/host/pci-tegra.c
 
+PCI DRIVER FOR SAMSUNG EXYNOS
+M:     Jingoo Han <jg1.han@samsung.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     drivers/pci/host/pci-exynos.c
+
 PCMCIA SUBSYSTEM
 P:     Linux PCMCIA Team
 L:     linux-pcmcia@lists.infradead.org
@@ -6595,7 +6653,7 @@ S:        Obsolete
 F:     drivers/net/wireless/prism54/
 
 PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M:     Mikael Pettersson <mikpe@it.uu.se>
+M:     Mikael Pettersson <mikpelinux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     drivers/ata/sata_promise.*
@@ -7266,9 +7324,9 @@ F:        include/linux/sched.h
 F:     include/uapi/linux/sched.h
 
 SCORE ARCHITECTURE
-M:     Chen Liqin <liqin.chen@sunplusct.com>
+M:     Chen Liqin <liqin.linux@gmail.com>
 M:     Lennox Wu <lennox.wu@gmail.com>
-W:     http://www.sunplusct.com
+W:     http://www.sunplus.com
 S:     Supported
 F:     arch/score/
 
@@ -7798,6 +7856,13 @@ F:       Documentation/sound/alsa/soc/
 F:     sound/soc/
 F:     include/sound/soc*
 
+SOUND - DMAENGINE HELPERS
+M:     Lars-Peter Clausen <lars@metafoo.de>
+S:     Supported
+F:     include/sound/dmaengine_pcm.h
+F:     sound/core/pcm_dmaengine.c
+F:     sound/soc/soc-generic-dmaengine-pcm.c
+
 SPARC + UltraSPARC (sparc/sparc64)
 M:     "David S. Miller" <davem@davemloft.net>
 L:     sparclinux@vger.kernel.org
@@ -8077,7 +8142,7 @@ F:        drivers/sh/
 SUSPEND TO RAM
 M:     Len Brown <len.brown@intel.com>
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/
@@ -8578,14 +8643,6 @@ S:       Maintained
 F:     arch/m68k/*/*_no.*
 F:     arch/m68k/include/asm/*_no.*
 
-UCLINUX FOR RENESAS H8/300 (H8300)
-M:     Yoshinori Sato <ysato@users.sourceforge.jp>
-W:     http://uclinux-h8.sourceforge.jp/
-S:     Supported
-F:     arch/h8300/
-F:     drivers/ide/ide-h8300.c
-F:     drivers/net/ethernet/8390/ne-h8300.c
-
 UDF FILESYSTEM
 M:     Jan Kara <jack@suse.cz>
 S:     Maintained
@@ -8732,9 +8789,8 @@ F:        Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
 USB/IP DRIVERS
-M:     Matt Mooney <mfm@muteddisk.com>
 L:     linux-usb@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/staging/usbip/
 
 USB ISP116X DRIVER
@@ -9374,6 +9430,7 @@ F:        arch/arm64/include/asm/xen/
 
 XEN NETWORK BACKEND DRIVER
 M:     Ian Campbell <ian.campbell@citrix.com>
+M:     Wei Liu <wei.liu2@citrix.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 S:     Supported
index de004ceb6b5e32d65e4b21c77855806bf709bbfc..7ab985d60cc08f6eef19f75ce8cadf9920a89e48 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
@@ -659,6 +659,12 @@ KBUILD_CFLAGS      += $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# disallow errors like 'EXPORT_GPL(foo);' with missing header
+KBUILD_CFLAGS   += $(call cc-option,-Werror=implicit-int)
+
+# require functions to have arguments in prototypes, not empty 'int foo()'
+KBUILD_CFLAGS   += $(call cc-option,-Werror=strict-prototypes)
+
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
index 1feb169274fe613cc5f0360c797668bf84497c4b..af2cc6eabcc781c4e8f7ee2067de75844ed5874d 100644 (file)
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
 config HAVE_ARCH_JUMP_LABEL
        bool
 
-config HAVE_ARCH_MUTEX_CPU_RELAX
-       bool
-
 config HAVE_RCU_TABLE_FREE
        bool
 
index 35a300d4a9fb37f3a08e3f63365c72bcc2a3ecbf..84803f88a1693afebb1ecea98ec4a165be264db6 100644 (file)
@@ -1,6 +1,7 @@
 config ALPHA
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_AOUT
        select HAVE_IDE
        select HAVE_OPROFILE
index 467de010ea7ee130cdbda0b532f10c885f8d1308..e3a1491d5073a0b59a6b9eb207c4a684ae6c9495 100644 (file)
@@ -81,6 +81,8 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
-#define SO_BUSY_POLL                   46
+#define SO_BUSY_POLL           46
+
+#define SO_MAX_PACING_RATE     47
 
 #endif /* _UAPI_ASM_SOCKET_H */
index f158197ac5b04432ac6beb37c9175629361c0901..b6a8c2dfbe6e42cd51def893784f0780bc67264e 100644 (file)
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+       unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       __asm__ __volatile__(
+       "       ex  %0, [%1]            \n"
+       : "+r" (tmp)
+       : "r"(&(lock->slock))
+       : "memory");
+
        smp_mb();
 }
 
index 32420824375b351083da3e8686cc0013f0914871..30c9baffa96f1f3a5cab5d6ec6fe83b9f4e86318 100644 (file)
@@ -43,7 +43,7 @@
  * Because it essentially checks if buffer end is within limit and @len is
  * non-ngeative, which implies that buffer start will be within limit too.
  *
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majoritof cases, @len is generally
  * compile time constant, causing first sub-expression to be compile time
  * subsumed.
  *
@@ -53,7 +53,7 @@
  *
  */
 #define __user_ok(addr, sz)    (((sz) <= TASK_SIZE) && \
-                                (((addr)+(sz)) <= get_fs()))
+                                ((addr) <= (get_fs() - (sz))))
 #define __access_ok(addr, sz)  (unlikely(__kernel_ok) || \
                                 likely(__user_ok((addr), (sz))))
 
index 72f97822784a4627da66e835002c309b84dc14d9..eb1c2ee5eaf0a4e0b8811092568943ed96be5388 100644 (file)
@@ -87,13 +87,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
        kcb->kprobe_status = kcb->prev_kprobe.status;
 }
 
 static inline void __kprobes set_current_kprobe(struct kprobe *p)
 {
-       __get_cpu_var(current_kprobe) = p;
+       __this_cpu_write(current_kprobe, p);
 }
 
 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
@@ -237,7 +237,7 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 
                return 1;
        } else if (kprobe_running()) {
-               p = __get_cpu_var(current_kprobe);
+               p = __this_cpu_read(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
                        setup_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_HIT_SS;
index 333238564b67f1239afde4ab4d584f16d8a48572..5d76706139dd36a246eb545f36fe42c1bf44ee9d 100644 (file)
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
        REG_IGNORE_ONE(pad2);
        REG_IN_CHUNK(callee, efa, cregs);       /* callee_regs[r25..r13] */
        REG_IGNORE_ONE(efa);                    /* efa update invalid */
-       REG_IN_ONE(stop_pc, &ptregs->ret);      /* stop_pc: PC update */
+       REG_IGNORE_ONE(stop_pc);                        /* PC updated via @ret */
 
        return ret;
 }
index ee6ef2f60a280c171e42c072401e9b7c1f79ae0c..7e95e1a86510fee2f2e1511f8bc7b63f8fb48122 100644 (file)
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
 {
        struct rt_sigframe __user *sf;
        unsigned int magic;
-       int err;
        struct pt_regs *regs = current_pt_regs();
 
        /* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
        if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
                goto badframe;
 
-       err = restore_usr_regs(regs, sf);
-       err |= __get_user(magic, &sf->sigret_magic);
-       if (err)
+       if (__get_user(magic, &sf->sigret_magic))
                goto badframe;
 
        if (unlikely(is_do_ss_needed(magic)))
                if (restore_altstack(&sf->uc.uc_stack))
                        goto badframe;
 
+       if (restore_usr_regs(regs, sf))
+               goto badframe;
+
        /* Don't restart from sigreturn */
        syscall_wont_restart(regs);
 
@@ -190,6 +190,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
        if (!sf)
                return 1;
 
+       /*
+        * w/o SA_SIGINFO, struct ucontext is partially populated (only
+        * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+        * during signal handler execution. This works for SA_SIGINFO as well
+        * although the semantics are now overloaded (the same reg state can be
+        * inspected by userland: but are they allowed to fiddle with it ?
+        */
+       err |= stash_usr_regs(sf, regs, set);
+
        /*
         * SA_SIGINFO requires 3 args to signal handler:
         *  #1: sig-no (common to any handler)
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
                magic = MAGIC_SIGALTSTK;
        }
 
-       /*
-        * w/o SA_SIGINFO, struct ucontext is partially populated (only
-        * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
-        * during signal handler execution. This works for SA_SIGINFO as well
-        * although the semantics are now overloaded (the same reg state can be
-        * inspected by userland: but are they allowed to fiddle with it ?
-        */
-       err |= stash_usr_regs(sf, regs, set);
        err |= __put_user(magic, &sf->sigret_magic);
        if (err)
                return err;
index 0e51e69cf30d772b646ef8a1d2c5605bf63b929a..4c21dde2f6a9625bc4ff4bc57126d4783fc73214 100644 (file)
@@ -206,7 +206,7 @@ static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
 
 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
 {
-       struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+       struct clock_event_device *clk = this_cpu_ptr(&arc_clockevent_device);
 
        arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
        clk->event_handler(clk);
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
 {
        struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
 
-       clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
-
-       clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
        clk->cpumask = cpumask_of(cpu);
-
-       clockevents_register_device(clk);
+       clockevents_config_and_register(clk, arc_get_core_freq(),
+                                       0, ARC_TIMER_MAX);
 
        /*
         * setup the per-cpu timer IRQ handler - for all cpus
index 28d1700607474eb01be14e0600832bf7ee4cf999..7ff5b5c183bb026716295c13f7b123de1d67a96f 100644 (file)
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
                regs->status32 &= ~STATUS_DE_MASK;
        } else {
                regs->ret += state.instr_len;
+
+               /* handle zero-overhead-loop */
+               if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+                       regs->ret = regs->lp_start;
+                       regs->lp_count--;
+               }
        }
 
        return 0;
index 3f7714d8d2d216bf3bbd7b4a5b227ea982997554..b6a708ef6067124e3b8b7e3d0cc8aebccb72787d 100644 (file)
@@ -5,6 +5,8 @@ config ARM
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
+       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
        select CLONE_BACKWARDS
@@ -51,6 +53,8 @@ config ARM
        select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
        select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
        select HAVE_PERF_EVENTS
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
@@ -692,7 +696,6 @@ config ARCH_SA1100
        select GENERIC_CLOCKEVENTS
        select HAVE_IDE
        select ISA
-       select NEED_MACH_GPIO_H
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@ -1549,6 +1552,32 @@ config MCPM
          for (multi-)cluster based systems, such as big.LITTLE based
          systems.
 
+config BIG_LITTLE
+       bool "big.LITTLE support (Experimental)"
+       depends on CPU_V7 && SMP
+       select MCPM
+       help
+         This option enables support selections for the big.LITTLE
+         system architecture.
+
+config BL_SWITCHER
+       bool "big.LITTLE switcher support"
+       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+       select CPU_PM
+       select ARM_CPU_SUSPEND
+       help
+         The big.LITTLE "switcher" provides the core functionality to
+         transparently handle transition between a cluster of A15's
+         and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+       tristate "Simple big.LITTLE switcher user interface"
+       depends on BL_SWITCHER && DEBUG_KERNEL
+       help
+         This is a simple and dummy char dev interface to control
+         the big.LITTLE switcher core code.  It is meant for
+         debugging purposes only.
+
 choice
        prompt "Memory split"
        default VMSPLIT_3G
@@ -2217,8 +2246,7 @@ config NEON
 
 config KERNEL_MODE_NEON
        bool "Support for NEON in kernel mode"
-       default n
-       depends on NEON
+       depends on NEON && AEABI
        help
          Say Y to include support for NEON in kernel mode.
 
index 9762c84b419845f05ee6a7d1f9f95084dde310b5..2b3206824353f612d3c72e81eae0b1a157f9d1c7 100644 (file)
@@ -834,6 +834,20 @@ choice
                  options; the platform specific options are deprecated
                  and will be soon removed.
 
+       config DEBUG_LL_UART_EFM32
+               bool "Kernel low-level debugging via efm32 UART"
+               depends on ARCH_EFM32
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to an UART or USART port on efm32 based
+                 machines. Use the following addresses for DEBUG_UART_PHYS:
+
+                   0x4000c000 | USART0
+                   0x4000c400 | USART1
+                   0x4000c800 | USART2
+                   0x4000e000 | UART0
+                   0x4000e400 | UART1
+
        config DEBUG_LL_UART_PL01X
                bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
                help
@@ -885,6 +899,7 @@ config DEBUG_LL_INCLUDE
        default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
        default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
        default "debug/exynos.S" if DEBUG_EXYNOS_UART
+       default "debug/efm32.S" if DEBUG_LL_UART_EFM32
        default "debug/icedcc.S" if DEBUG_ICEDCC
        default "debug/imx.S" if DEBUG_IMX1_UART || \
                                 DEBUG_IMX25_UART || \
@@ -951,6 +966,7 @@ config DEBUG_UART_PHYS
        default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
        default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0x20201000 if DEBUG_BCM2835
+       default 0x4000e400 if DEBUG_LL_UART_EFM32
        default 0x40090000 if ARCH_LPC32XX
        default 0x40100000 if DEBUG_PXA_UART1
        default 0x42000000 if ARCH_GEMINI
@@ -981,6 +997,7 @@ config DEBUG_UART_PHYS
        default 0xfff36000 if DEBUG_HIGHBANK_UART
        default 0xfffff700 if ARCH_IOP33X
        depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+               DEBUG_LL_UART_EFM32 || \
                DEBUG_UART_8250 || DEBUG_UART_PL01X
 
 config DEBUG_UART_VIRT
index a37a50f575a27af2c95abca5c473d6a60233d8b9..db50b626be9871f7426ee394a9b189d8d504c8a3 100644 (file)
@@ -296,10 +296,15 @@ archprepare:
 # Convert bzImage to zImage
 bzImage: zImage
 
-zImage Image xipImage bootpImage uImage: vmlinux
+BOOT_TARGETS   = zImage Image xipImage bootpImage uImage
+INSTALL_TARGETS        = zinstall uinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+$(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
-zinstall uinstall install: vmlinux
+$(INSTALL_TARGETS):
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
 %.dtb: | scripts
index 84aa2caf07ed203fb810220258401a1b51f7cab3..ec2f8065f955c5c31a69888bf261c58ea56ffb6d 100644 (file)
@@ -95,24 +95,24 @@ initrd:
        @test "$(INITRD)" != "" || \
        (echo You must specify INITRD; exit -1)
 
-install: $(obj)/Image
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+install:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/Image System.map "$(INSTALL_PATH)"
 
-zinstall: $(obj)/zImage
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+zinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/zImage System.map "$(INSTALL_PATH)"
 
-uinstall: $(obj)/uImage
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+uinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/uImage System.map "$(INSTALL_PATH)"
 
 zi:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/zImage System.map "$(INSTALL_PATH)"
 
 i:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/Image System.map "$(INSTALL_PATH)"
 
 subdir-            := bootp compressed dts
index cc0f1fb61753963d0d6f0a81d63f5d969c100d42..802720e3e8fd5c72004c14cce0bc572a0355356a 100644 (file)
@@ -41,6 +41,8 @@ dtb-$(CONFIG_ARCH_AT91)       += sama5d33ek.dtb
 dtb-$(CONFIG_ARCH_AT91)        += sama5d34ek.dtb
 dtb-$(CONFIG_ARCH_AT91)        += sama5d35ek.dtb
 
+dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
+
 dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
 dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \
        bcm28155-ap.dtb
@@ -183,6 +185,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
        am335x-evm.dtb \
        am335x-evmsk.dtb \
        am335x-bone.dtb \
+       am335x-boneblack.dtb \
        am3517-evm.dtb \
        am3517_mt_ventoux.dtb \
        am43x-epos-evm.dtb
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
new file mode 100644 (file)
index 0000000..2f66ded
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       model = "TI AM335x BeagleBone";
+       compatible = "ti,am335x-bone", "ti,am33xx";
+
+       cpus {
+               cpu@0 {
+                       cpu0-supply = <&dcdc2_reg>;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x80000000 0x10000000>; /* 256 MB */
+       };
+
+       am33xx_pinmux: pinmux@44e10800 {
+               pinctrl-names = "default";
+               pinctrl-0 = <&clkout2_pin>;
+
+               user_leds_s0: user_leds_s0 {
+                       pinctrl-single,pins = <
+                               0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7)  /* gpmc_a5.gpio1_21 */
+                               0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7)    /* gpmc_a6.gpio1_22 */
+                               0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7)  /* gpmc_a7.gpio1_23 */
+                               0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7)    /* gpmc_a8.gpio1_24 */
+                       >;
+               };
+
+               i2c0_pins: pinmux_i2c0_pins {
+                       pinctrl-single,pins = <
+                               0x188 (PIN_INPUT_PULLUP | MUX_MODE0)    /* i2c0_sda.i2c0_sda */
+                               0x18c (PIN_INPUT_PULLUP | MUX_MODE0)    /* i2c0_scl.i2c0_scl */
+                       >;
+               };
+
+               uart0_pins: pinmux_uart0_pins {
+                       pinctrl-single,pins = <
+                               0x170 (PIN_INPUT_PULLUP | MUX_MODE0)    /* uart0_rxd.uart0_rxd */
+                               0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+                       >;
+               };
+
+               clkout2_pin: pinmux_clkout2_pin {
+                       pinctrl-single,pins = <
+                               0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+                       >;
+               };
+
+               cpsw_default: cpsw_default {
+                       pinctrl-single,pins = <
+                               /* Slave 1 */
+                               0x110 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxerr.mii1_rxerr */
+                               0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
+                               0x118 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxdv.mii1_rxdv */
+                               0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
+                               0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
+                               0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
+                               0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
+                               0x12c (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_txclk.mii1_txclk */
+                               0x130 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxclk.mii1_rxclk */
+                               0x134 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd3.mii1_rxd3 */
+                               0x138 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd2.mii1_rxd2 */
+                               0x13c (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd1.mii1_rxd1 */
+                               0x140 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd0.mii1_rxd0 */
+                       >;
+               };
+
+               cpsw_sleep: cpsw_sleep {
+                       pinctrl-single,pins = <
+                               /* Slave 1 reset value */
+                               0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                       >;
+               };
+
+               davinci_mdio_default: davinci_mdio_default {
+                       pinctrl-single,pins = <
+                               /* MDIO */
+                               0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)    /* mdio_data.mdio_data */
+                               0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0)                   /* mdio_clk.mdio_clk */
+                       >;
+               };
+
+               davinci_mdio_sleep: davinci_mdio_sleep {
+                       pinctrl-single,pins = <
+                               /* MDIO reset value */
+                               0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                               0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+                       >;
+               };
+       };
+
+       ocp {
+               uart0: serial@44e09000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&uart0_pins>;
+
+                       status = "okay";
+               };
+
+               musb: usb@47400000 {
+                       status = "okay";
+
+                       control@44e10000 {
+                               status = "okay";
+                       };
+
+                       usb-phy@47401300 {
+                               status = "okay";
+                       };
+
+                       usb-phy@47401b00 {
+                               status = "okay";
+                       };
+
+                       usb@47401000 {
+                               status = "okay";
+                       };
+
+                       usb@47401800 {
+                               status = "okay";
+                               dr_mode = "host";
+                       };
+
+                       dma-controller@07402000  {
+                               status = "okay";
+                       };
+               };
+
+               i2c0: i2c@44e0b000 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&i2c0_pins>;
+
+                       status = "okay";
+                       clock-frequency = <400000>;
+
+                       tps: tps@24 {
+                               reg = <0x24>;
+                       };
+
+               };
+       };
+
+       leds {
+               pinctrl-names = "default";
+               pinctrl-0 = <&user_leds_s0>;
+
+               compatible = "gpio-leds";
+
+               led@2 {
+                       label = "beaglebone:green:heartbeat";
+                       gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "heartbeat";
+                       default-state = "off";
+               };
+
+               led@3 {
+                       label = "beaglebone:green:mmc0";
+                       gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "mmc0";
+                       default-state = "off";
+               };
+
+               led@4 {
+                       label = "beaglebone:green:usr2";
+                       gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+                       default-state = "off";
+               };
+
+               led@5 {
+                       label = "beaglebone:green:usr3";
+                       gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+                       default-state = "off";
+               };
+       };
+};
+
+/include/ "tps65217.dtsi"
+
+&tps {
+       regulators {
+               dcdc1_reg: regulator@0 {
+                       regulator-always-on;
+               };
+
+               dcdc2_reg: regulator@1 {
+                       /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+                       regulator-name = "vdd_mpu";
+                       regulator-min-microvolt = <925000>;
+                       regulator-max-microvolt = <1325000>;
+                       regulator-boot-on;
+                       regulator-always-on;
+               };
+
+               dcdc3_reg: regulator@2 {
+                       /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+                       regulator-name = "vdd_core";
+                       regulator-min-microvolt = <925000>;
+                       regulator-max-microvolt = <1150000>;
+                       regulator-boot-on;
+                       regulator-always-on;
+               };
+
+               ldo1_reg: regulator@3 {
+                       regulator-always-on;
+               };
+
+               ldo2_reg: regulator@4 {
+                       regulator-always-on;
+               };
+
+               ldo3_reg: regulator@5 {
+                       regulator-always-on;
+               };
+
+               ldo4_reg: regulator@6 {
+                       regulator-always-on;
+               };
+       };
+};
+
+&cpsw_emac0 {
+       phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "mii";
+};
+
+&cpsw_emac1 {
+       phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "mii";
+};
+
+&mac {
+       pinctrl-names = "default", "sleep";
+       pinctrl-0 = <&cpsw_default>;
+       pinctrl-1 = <&cpsw_sleep>;
+
+};
+
+&davinci_mdio {
+       pinctrl-names = "default", "sleep";
+       pinctrl-0 = <&davinci_mdio_default>;
+       pinctrl-1 = <&davinci_mdio_sleep>;
+};
index d318987d44a1931931f553eaad143c54efc53280..7993c489982c86ab2cf03a85b152f9b7db99ad3f 100644 (file)
@@ -8,258 +8,4 @@
 /dts-v1/;
 
 #include "am33xx.dtsi"
-
-/ {
-       model = "TI AM335x BeagleBone";
-       compatible = "ti,am335x-bone", "ti,am33xx";
-
-       cpus {
-               cpu@0 {
-                       cpu0-supply = <&dcdc2_reg>;
-               };
-       };
-
-       memory {
-               device_type = "memory";
-               reg = <0x80000000 0x10000000>; /* 256 MB */
-       };
-
-       am33xx_pinmux: pinmux@44e10800 {
-               pinctrl-names = "default";
-               pinctrl-0 = <&clkout2_pin>;
-
-               user_leds_s0: user_leds_s0 {
-                       pinctrl-single,pins = <
-                               0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7)  /* gpmc_a5.gpio1_21 */
-                               0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7)    /* gpmc_a6.gpio1_22 */
-                               0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7)  /* gpmc_a7.gpio1_23 */
-                               0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7)    /* gpmc_a8.gpio1_24 */
-                       >;
-               };
-
-               i2c0_pins: pinmux_i2c0_pins {
-                       pinctrl-single,pins = <
-                               0x188 (PIN_INPUT_PULLUP | MUX_MODE0)    /* i2c0_sda.i2c0_sda */
-                               0x18c (PIN_INPUT_PULLUP | MUX_MODE0)    /* i2c0_scl.i2c0_scl */
-                       >;
-               };
-
-               uart0_pins: pinmux_uart0_pins {
-                       pinctrl-single,pins = <
-                               0x170 (PIN_INPUT_PULLUP | MUX_MODE0)    /* uart0_rxd.uart0_rxd */
-                               0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
-                       >;
-               };
-
-               clkout2_pin: pinmux_clkout2_pin {
-                       pinctrl-single,pins = <
-                               0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
-                       >;
-               };
-
-               cpsw_default: cpsw_default {
-                       pinctrl-single,pins = <
-                               /* Slave 1 */
-                               0x110 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxerr.mii1_rxerr */
-                               0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
-                               0x118 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxdv.mii1_rxdv */
-                               0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
-                               0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
-                               0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
-                               0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
-                               0x12c (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_txclk.mii1_txclk */
-                               0x130 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxclk.mii1_rxclk */
-                               0x134 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd3.mii1_rxd3 */
-                               0x138 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd2.mii1_rxd2 */
-                               0x13c (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd1.mii1_rxd1 */
-                               0x140 (PIN_INPUT_PULLUP | MUX_MODE0)    /* mii1_rxd0.mii1_rxd0 */
-                       >;
-               };
-
-               cpsw_sleep: cpsw_sleep {
-                       pinctrl-single,pins = <
-                               /* Slave 1 reset value */
-                               0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                       >;
-               };
-
-               davinci_mdio_default: davinci_mdio_default {
-                       pinctrl-single,pins = <
-                               /* MDIO */
-                               0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)    /* mdio_data.mdio_data */
-                               0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0)                   /* mdio_clk.mdio_clk */
-                       >;
-               };
-
-               davinci_mdio_sleep: davinci_mdio_sleep {
-                       pinctrl-single,pins = <
-                               /* MDIO reset value */
-                               0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                               0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
-                       >;
-               };
-       };
-
-       ocp {
-               uart0: serial@44e09000 {
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&uart0_pins>;
-
-                       status = "okay";
-               };
-
-               musb: usb@47400000 {
-                       status = "okay";
-
-                       control@44e10000 {
-                               status = "okay";
-                       };
-
-                       usb-phy@47401300 {
-                               status = "okay";
-                       };
-
-                       usb-phy@47401b00 {
-                               status = "okay";
-                       };
-
-                       usb@47401000 {
-                               status = "okay";
-                       };
-
-                       usb@47401800 {
-                               status = "okay";
-                               dr_mode = "host";
-                       };
-
-                       dma-controller@07402000  {
-                               status = "okay";
-                       };
-               };
-
-               i2c0: i2c@44e0b000 {
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&i2c0_pins>;
-
-                       status = "okay";
-                       clock-frequency = <400000>;
-
-                       tps: tps@24 {
-                               reg = <0x24>;
-                       };
-
-               };
-       };
-
-       leds {
-               pinctrl-names = "default";
-               pinctrl-0 = <&user_leds_s0>;
-
-               compatible = "gpio-leds";
-
-               led@2 {
-                       label = "beaglebone:green:heartbeat";
-                       gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
-                       linux,default-trigger = "heartbeat";
-                       default-state = "off";
-               };
-
-               led@3 {
-                       label = "beaglebone:green:mmc0";
-                       gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
-                       linux,default-trigger = "mmc0";
-                       default-state = "off";
-               };
-
-               led@4 {
-                       label = "beaglebone:green:usr2";
-                       gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
-                       default-state = "off";
-               };
-
-               led@5 {
-                       label = "beaglebone:green:usr3";
-                       gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
-                       default-state = "off";
-               };
-       };
-};
-
-/include/ "tps65217.dtsi"
-
-&tps {
-       regulators {
-               dcdc1_reg: regulator@0 {
-                       regulator-always-on;
-               };
-
-               dcdc2_reg: regulator@1 {
-                       /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
-                       regulator-name = "vdd_mpu";
-                       regulator-min-microvolt = <925000>;
-                       regulator-max-microvolt = <1325000>;
-                       regulator-boot-on;
-                       regulator-always-on;
-               };
-
-               dcdc3_reg: regulator@2 {
-                       /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
-                       regulator-name = "vdd_core";
-                       regulator-min-microvolt = <925000>;
-                       regulator-max-microvolt = <1150000>;
-                       regulator-boot-on;
-                       regulator-always-on;
-               };
-
-               ldo1_reg: regulator@3 {
-                       regulator-always-on;
-               };
-
-               ldo2_reg: regulator@4 {
-                       regulator-always-on;
-               };
-
-               ldo3_reg: regulator@5 {
-                       regulator-always-on;
-               };
-
-               ldo4_reg: regulator@6 {
-                       regulator-always-on;
-               };
-       };
-};
-
-&cpsw_emac0 {
-       phy_id = <&davinci_mdio>, <0>;
-       phy-mode = "mii";
-};
-
-&cpsw_emac1 {
-       phy_id = <&davinci_mdio>, <1>;
-       phy-mode = "mii";
-};
-
-&mac {
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&cpsw_default>;
-       pinctrl-1 = <&cpsw_sleep>;
-
-};
-
-&davinci_mdio {
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&davinci_mdio_default>;
-       pinctrl-1 = <&davinci_mdio_sleep>;
-};
+#include "am335x-bone-common.dtsi"
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
new file mode 100644 (file)
index 0000000..197cadf
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+
+&ldo3_reg {
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
+       regulator-always-on;
+};
index f9c5da9c7fe1ce7d56557fb4582a0d9a53bbcfde..d76ae24c97453ff94ee9bc7232ed8283c8dbc01c 100644 (file)
                                /* Filled in by U-Boot */
                                mac-address = [ 00 00 00 00 00 00 ];
                        };
+
+                       phy_sel: cpsw-phy-sel@44e10650 {
+                               compatible = "ti,am3352-cpsw-phy-sel";
+                               reg= <0x44e10650 0x4>;
+                               reg-names = "gmii-sel";
+                       };
                };
 
                ocmcram: ocmcram@40300000 {
index 05e4485a822521ec1ca234c7991c674f817f4667..8ac2ac1f69cc0d6f50101c815fada7c9183ce9d8 100644 (file)
        };
 
        soc {
+               ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
+                         MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
+
+               pcie-controller {
+                       status = "okay";
+
+                       /* Connected to Marvell SATA controller */
+                       pcie@1,0 {
+                               /* Port 0, Lane 0 */
+                               status = "okay";
+                       };
+
+                       /* Connected to FL1009 USB 3.0 controller */
+                       pcie@2,0 {
+                               /* Port 1, Lane 0 */
+                               status = "okay";
+                       };
+               };
+
                internal-regs {
                        serial@12000 {
                                clock-frequency = <200000000>;
                                        marvell,pins = "mpp56";
                                        marvell,function = "gpio";
                                };
+
+                               poweroff: poweroff {
+                                       marvell,pins = "mpp8";
+                                       marvell,function = "gpio";
+                               };
                        };
 
                        mdio {
                                        pwm_polarity = <0>;
                                };
                        };
-
-                       pcie-controller {
-                               status = "okay";
-
-                               /* Connected to Marvell SATA controller */
-                               pcie@1,0 {
-                                       /* Port 0, Lane 0 */
-                                       status = "okay";
-                               };
-
-                               /* Connected to FL1009 USB 3.0 controller */
-                               pcie@2,0 {
-                                       /* Port 1, Lane 0 */
-                                       status = "okay";
-                               };
-                       };
                };
        };
 
                button@1 {
                        label = "Power Button";
                        linux,code = <116>;     /* KEY_POWER */
-                       gpios = <&gpio1 30 1>;
+                       gpios = <&gpio1 30 0>;
                };
 
                button@2 {
                };
        };
 
+       gpio_poweroff {
+               compatible = "gpio-poweroff";
+               pinctrl-0 = <&poweroff>;
+               pinctrl-names = "default";
+               gpios = <&gpio0 8 1>;
+       };
+
 };
index def125c0eeaa1596892f5cda162667d99853827c..3058522f5aad2929092f7f7bcc2dd78a9c62c22a 100644 (file)
@@ -70,6 +70,8 @@
 
                        timer@20300 {
                                compatible = "marvell,armada-xp-timer";
+                               clocks = <&coreclk 2>, <&refclk>;
+                               clock-names = "nbclk", "fixed";
                        };
 
                        coreclk: mvebu-sar@18230 {
                        };
                };
        };
+
+       clocks {
+               /* 25 MHz reference crystal */
+               refclk: oscillator {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <25000000>;
+               };
+       };
 };
index cf78ac0b04b12f77a3bc707e0228ce19a9e09785..e74dc15efa9d2f77fa339a16b975f42566ca726e 100644 (file)
                                                         AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>;  /* PA8 periph A */
                                        };
 
-                                       pinctrl_uart2_rts: uart2_rts-0 {
+                                       pinctrl_usart2_rts: usart2_rts-0 {
                                                atmel,pins =
                                                        <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>;  /* PB0 periph B */
                                        };
 
-                                       pinctrl_uart2_cts: uart2_cts-0 {
+                                       pinctrl_usart2_cts: usart2_cts-0 {
                                                atmel,pins =
                                                        <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>;  /* PB1 periph B */
                                        };
                                interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
                                dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>;
                                dma-names = "rxtx";
+                               pinctrl-names = "default";
                                #address-cells = <1>;
                                #size-cells = <0>;
                                status = "disabled";
                                interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
                                dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>;
                                dma-names = "rxtx";
+                               pinctrl-names = "default";
                                #address-cells = <1>;
                                #size-cells = <0>;
                                status = "disabled";
index 8678e0c1111981be494cfe6fecf05eb61a393169..6db4f81d4795d2855b83aac5a72cc3c7b8cb6727 100644 (file)
                                interrupts = <17>;
                                fifosize = <128>;
                                clocks = <&clks 13>;
+                               sirf,uart-dma-rx-channel = <21>;
+                               sirf,uart-dma-tx-channel = <2>;
                        };
 
                        uart1: uart@b0060000 {
                                interrupts = <19>;
                                fifosize = <128>;
                                clocks = <&clks 15>;
+                               sirf,uart-dma-rx-channel = <6>;
+                               sirf,uart-dma-tx-channel = <7>;
                        };
 
                        usp0: usp@b0080000 {
                                compatible = "sirf,prima2-usp";
                                reg = <0xb0080000 0x10000>;
                                interrupts = <20>;
+                               fifosize = <128>;
                                clocks = <&clks 28>;
+                               sirf,usp-dma-rx-channel = <17>;
+                               sirf,usp-dma-tx-channel = <18>;
                        };
 
                        usp1: usp@b0090000 {
                                compatible = "sirf,prima2-usp";
                                reg = <0xb0090000 0x10000>;
                                interrupts = <21>;
+                               fifosize = <128>;
                                clocks = <&clks 29>;
+                               sirf,usp-dma-rx-channel = <14>;
+                               sirf,usp-dma-tx-channel = <15>;
                        };
 
                        dmac0: dma-controller@b00b0000 {
                                compatible = "sirf,prima2-vip";
                                reg = <0xb00C0000 0x10000>;
                                clocks = <&clks 31>;
+                               interrupts = <14>;
+                               sirf,vip-dma-rx-channel = <16>;
                        };
 
                        spi0: spi@b00d0000 {
index 7d7cc777ff7b76099e8de5098a4a3a044a208ca9..bbac42a78ce543c2790a7943697afda1eba2e2aa 100644 (file)
                             <1 14 0xf08>,
                             <1 11 0xf08>,
                             <1 10 0xf08>;
+               /* Unfortunately we need this since some versions of U-Boot
+                * on Exynos don't set the CNTFRQ register, so we need the
+                * value from DT.
+                */
+               clock-frequency = <24000000>;
        };
 
        mct@101C0000 {
index c037c223619a7a9ced403c88f1146fb20c112378..b7a1c6d950b984b44efd65c100c05024c80c1b4a 100644 (file)
                                compatible = "fsl,imx27-cspi";
                                reg = <0x1000e000 0x1000>;
                                interrupts = <16>;
-                               clocks = <&clks 53>, <&clks 53>;
+                               clocks = <&clks 53>, <&clks 60>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
                                compatible = "fsl,imx27-cspi";
                                reg = <0x1000f000 0x1000>;
                                interrupts = <15>;
-                               clocks = <&clks 52>, <&clks 52>;
+                               clocks = <&clks 52>, <&clks 60>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
                                compatible = "fsl,imx27-cspi";
                                reg = <0x10017000 0x1000>;
                                interrupts = <6>;
-                               clocks = <&clks 51>, <&clks 51>;
+                               clocks = <&clks 51>, <&clks 60>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                        };
index a85abb424c3482d02dd325ae388bcf63686acc56..54cee6517902d647507f3ed40302135fcba1b849 100644 (file)
                                compatible = "fsl,imx51-pata", "fsl,imx27-pata";
                                reg = <0x83fe0000 0x4000>;
                                interrupts = <70>;
-                               clocks = <&clks 161>;
+                               clocks = <&clks 172>;
                                status = "disabled";
                        };
 
index c0e38a45e4bb487493ed8400328eb27d8a8965b2..9bbe82bdee4112ca0b15de05118b70472b04287c 100644 (file)
 #define MX6QDL_PAD_EIM_D29__ECSPI4_SS0              0x0c8 0x3dc 0x824 0x2 0x1
 #define MX6QDL_PAD_EIM_D29__UART2_RTS_B             0x0c8 0x3dc 0x924 0x4 0x1
 #define MX6QDL_PAD_EIM_D29__UART2_CTS_B             0x0c8 0x3dc 0x000 0x4 0x0
-#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B         0x0c4 0x3dc 0x000 0x4 0x0
-#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B         0x0c4 0x3dc 0x924 0x4 0x1
+#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B         0x0c8 0x3dc 0x000 0x4 0x0
+#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B         0x0c8 0x3dc 0x924 0x4 0x1
 #define MX6QDL_PAD_EIM_D29__GPIO3_IO29              0x0c8 0x3dc 0x000 0x5 0x0
 #define MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC         0x0c8 0x3dc 0x8e4 0x6 0x0
 #define MX6QDL_PAD_EIM_D29__IPU1_DI0_PIN14          0x0c8 0x3dc 0x000 0x7 0x0
index cf7aeaf89e9c1a6b8795113e0726c48b58f16803..1335b2e1bed4c66efe95ee2f679129d3856ccb43 100644 (file)
@@ -13,6 +13,7 @@
                cpu@0 {
                        device_type = "cpu";
                        compatible = "marvell,feroceon";
+                       reg = <0>;
                        clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
                        clock-names = "cpu_clk", "ddrclk", "powersave";
                };
                xor@60900 {
                        compatible = "marvell,orion-xor";
                        reg = <0x60900 0x100
-                              0xd0B00 0x100>;
+                              0x60B00 0x100>;
                        status = "okay";
                        clocks = <&gate_clk 16>;
 
index afdb16417d4e894bc2df369cd7c229a5b7161043..2816bf61267231fd7ec2dd11f87c6da9ed4b3311 100644 (file)
@@ -11,7 +11,7 @@
 
 / {
        model = "TI OMAP3 BeagleBoard xM";
-       compatible = "ti,omap3-beagle-xm, ti,omap3-beagle", "ti,omap3";
+       compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index bc48b114eae6404cf0e12ff08113f0bfd6002cf5..2326d11462a57dcb9e43991b359f63ce14da1520 100644 (file)
                >;
        };
 
+       mcbsp2_pins: pinmux_mcbsp2_pins {
+               pinctrl-single,pins = <
+                       0x10c (PIN_INPUT | MUX_MODE0)           /* mcbsp2_fsx.mcbsp2_fsx */
+                       0x10e (PIN_INPUT | MUX_MODE0)           /* mcbsp2_clkx.mcbsp2_clkx */
+                       0x110 (PIN_INPUT | MUX_MODE0)           /* mcbsp2_dr.mcbsp2.dr */
+                       0x112 (PIN_OUTPUT | MUX_MODE0)          /* mcbsp2_dx.mcbsp2_dx */
+               >;
+       };
+
        mmc1_pins: pinmux_mmc1_pins {
                pinctrl-single,pins = <
                        0x114 (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc1_clk.sdmmc1_clk */
        clock-frequency = <400000>;
 };
 
+&mcbsp2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mcbsp2_pins>;
+};
+
 &mmc1 {
       pinctrl-names = "default";
       pinctrl-0 = <&mmc1_pins>;
index 7d95cda1fae4f0349bdfb582de99be2baf36dbf6..b41bd57f43287a048b73bfefe35b262ee139600e 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        pinctrl-single,register-width = <16>;
-                       pinctrl-single,function-mask = <0x7f1f>;
+                       pinctrl-single,function-mask = <0xff1f>;
                };
 
                omap3_pmx_wkup: pinmux@0x48002a00 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        pinctrl-single,register-width = <16>;
-                       pinctrl-single,function-mask = <0x7f1f>;
+                       pinctrl-single,function-mask = <0xff1f>;
                };
 
                gpio1: gpio@48310000 {
index faa95b5b242ee437aeea84c8d4dde915a67c2031..814ab67c8c299b0b818f669c917d51ebcb8a94c0 100644 (file)
         */
                clock-frequency = <19200000>;
        };
+
+       /* regulator for wl12xx on sdio5 */
+       wl12xx_vmmc: wl12xx_vmmc {
+               pinctrl-names = "default";
+               pinctrl-0 = <&wl12xx_gpio>;
+               compatible = "regulator-fixed";
+               regulator-name = "vwl1271";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               gpio = <&gpio2 11 0>;
+               startup-delay-us = <70000>;
+               enable-active-high;
+       };
 };
 
 &omap4_pmx_wkup {
                        0x1c (PIN_OUTPUT | MUX_MODE3)   /* gpio_wk8 */
                >;
        };
+
+       /*
+        * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
+        * REVISIT: Are the pull-ups needed for GPIO 48 and 49?
+        */
+       wl12xx_gpio: pinmux_wl12xx_gpio {
+               pinctrl-single,pins = <
+                       0x26 (PIN_OUTPUT | MUX_MODE3)           /* gpmc_a19.gpio_43 */
+                       0x2c (PIN_OUTPUT | MUX_MODE3)           /* gpmc_a22.gpio_46 */
+                       0x30 (PIN_OUTPUT_PULLUP | MUX_MODE3)    /* gpmc_a24.gpio_48 */
+                       0x32 (PIN_OUTPUT_PULLUP | MUX_MODE3)    /* gpmc_a25.gpio_49 */
+               >;
+       };
+
+       /* wl12xx GPIO inputs and SDIO pins */
+       wl12xx_pins: pinmux_wl12xx_pins {
+               pinctrl-single,pins = <
+                       0x38 (PIN_INPUT | MUX_MODE3)            /* gpmc_ncs2.gpio_52 */
+                       0x3a (PIN_INPUT | MUX_MODE3)            /* gpmc_ncs3.gpio_53 */
+                       0x108 (PIN_OUTPUT | MUX_MODE0)          /* sdmmc5_clk.sdmmc5_clk */
+                       0x10a (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc5_cmd.sdmmc5_cmd */
+                       0x10c (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc5_dat0.sdmmc5_dat0 */
+                       0x10e (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc5_dat1.sdmmc5_dat1 */
+                       0x110 (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc5_dat2.sdmmc5_dat2 */
+                       0x112 (PIN_INPUT_PULLUP | MUX_MODE0)    /* sdmmc5_dat3.sdmmc5_dat3 */
+               >;
+       };
 };
 
 &i2c1 {
 };
 
 &mmc5 {
-       ti,non-removable;
+       pinctrl-names = "default";
+       pinctrl-0 = <&wl12xx_pins>;
+       vmmc-supply = <&wl12xx_vmmc>;
+       non-removable;
        bus-width = <4>;
+       cap-power-off-card;
 };
 
 &emif1 {
index 7951b4ea500af37ac2a4f6b8f59e97cbf263d595..4f78380ecdb890c5b72cae0ac8937365540d80bd 100644 (file)
                        "DMic", "Digital Mic",
                        "Digital Mic", "Digital Mic1 Bias";
        };
+
+       /* regulator for wl12xx on sdio5 */
+       wl12xx_vmmc: wl12xx_vmmc {
+               pinctrl-names = "default";
+               pinctrl-0 = <&wl12xx_gpio>;
+               compatible = "regulator-fixed";
+               regulator-name = "vwl1271";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               gpio = <&gpio2 22 0>;
+               startup-delay-us = <70000>;
+               enable-active-high;
+       };
 };
 
 &omap4_pmx_wkup {
                        0xf0 (PIN_INPUT_PULLUP | MUX_MODE0)     /* i2c4_sda */
                >;
        };
+
+       /* wl12xx GPIO output for WLAN_EN */
+       wl12xx_gpio: pinmux_wl12xx_gpio {
+               pinctrl-single,pins = <
+                       0x3c (PIN_OUTPUT | MUX_MODE3)           /* gpmc_nwp.gpio_54 */
+               >;
+       };
+
+       /* wl12xx GPIO inputs and SDIO pins */
+       wl12xx_pins: pinmux_wl12xx_pins {
+               pinctrl-single,pins = <
+                       0x3a (PIN_INPUT | MUX_MODE3)            /* gpmc_ncs3.gpio_53 */
+                       0x108 (PIN_OUTPUT | MUX_MODE3)          /* sdmmc5_clk.sdmmc5_clk */
+                       0x10a (PIN_INPUT_PULLUP | MUX_MODE3)    /* sdmmc5_cmd.sdmmc5_cmd */
+                       0x10c (PIN_INPUT_PULLUP | MUX_MODE3)    /* sdmmc5_dat0.sdmmc5_dat0 */
+                       0x10e (PIN_INPUT_PULLUP | MUX_MODE3)    /* sdmmc5_dat1.sdmmc5_dat1 */
+                       0x110 (PIN_INPUT_PULLUP | MUX_MODE3)    /* sdmmc5_dat2.sdmmc5_dat2 */
+                       0x112 (PIN_INPUT_PULLUP | MUX_MODE3)    /* sdmmc5_dat3.sdmmc5_dat3 */
+               >;
+       };
 };
 
 &i2c1 {
 };
 
 &mmc5 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&wl12xx_pins>;
+       vmmc-supply = <&wl12xx_vmmc>;
+       non-removable;
        bus-width = <4>;
-       ti,non-removable;
+       cap-power-off-card;
 };
 
 &emif1 {
index 07be2cd7b3188f6c943b1928e812dfa676973bfb..7cdea1bfea091917455e46cc18af7d6d80a2c0f0 100644 (file)
                omap_dwc3@4a020000 {
                        compatible = "ti,dwc3";
                        ti,hwmods = "usb_otg_ss";
-                       reg = <0x4a020000 0x1000>;
+                       reg = <0x4a020000 0x10000>;
                        interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
                        dwc3@4a030000 {
                                compatible = "snps,dwc3";
-                               reg = <0x4a030000 0x1000>;
+                               reg = <0x4a030000 0x10000>;
                                interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
                                usb-phy = <&usb2_phy>, <&usb3_phy>;
                                tx-fifo-resize;
                        };
                };
 
-               ocp2scp {
+               ocp2scp@4a080000 {
                        compatible = "ti,omap-ocp2scp";
                        #address-cells = <1>;
                        #size-cells = <1>;
+                       reg = <0x4a080000 0x20>;
                        ranges;
                        ti,hwmods = "ocp2scp1";
                        usb2_phy: usb2phy@4a084000 {
index bbeb623fc2c6f82dc9f92b1c77a90811946f9736..27ed9f5144bcb927d777cf4468c1ddaa5c11c837 100644 (file)
                        compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       ranges = <0xb0000000 0xb0000000 0x180000>;
+                       ranges = <0xb0000000 0xb0000000 0x180000>,
+                              <0x56000000 0x56000000 0x1b00000>;
 
                        timer@b0020000 {
                                compatible = "sirf,prima2-tick";
                        uart0: uart@b0050000 {
                                cell-index = <0>;
                                compatible = "sirf,prima2-uart";
-                               reg = <0xb0050000 0x10000>;
+                               reg = <0xb0050000 0x1000>;
                                interrupts = <17>;
+                               fifosize = <128>;
                                clocks = <&clks 13>;
+                               sirf,uart-dma-rx-channel = <21>;
+                               sirf,uart-dma-tx-channel = <2>;
                        };
 
                        uart1: uart@b0060000 {
                                cell-index = <1>;
                                compatible = "sirf,prima2-uart";
-                               reg = <0xb0060000 0x10000>;
+                               reg = <0xb0060000 0x1000>;
                                interrupts = <18>;
+                               fifosize = <32>;
                                clocks = <&clks 14>;
                        };
 
                        uart2: uart@b0070000 {
                                cell-index = <2>;
                                compatible = "sirf,prima2-uart";
-                               reg = <0xb0070000 0x10000>;
+                               reg = <0xb0070000 0x1000>;
                                interrupts = <19>;
+                               fifosize = <128>;
                                clocks = <&clks 15>;
+                               sirf,uart-dma-rx-channel = <6>;
+                               sirf,uart-dma-tx-channel = <7>;
                        };
 
                        usp0: usp@b0080000 {
                                compatible = "sirf,prima2-usp";
                                reg = <0xb0080000 0x10000>;
                                interrupts = <20>;
+                               fifosize = <128>;
                                clocks = <&clks 28>;
+                               sirf,usp-dma-rx-channel = <17>;
+                               sirf,usp-dma-tx-channel = <18>;
                        };
 
                        usp1: usp@b0090000 {
                                compatible = "sirf,prima2-usp";
                                reg = <0xb0090000 0x10000>;
                                interrupts = <21>;
+                               fifosize = <128>;
                                clocks = <&clks 29>;
+                               sirf,usp-dma-rx-channel = <14>;
+                               sirf,usp-dma-tx-channel = <15>;
                        };
 
                        usp2: usp@b00a0000 {
                                compatible = "sirf,prima2-usp";
                                reg = <0xb00a0000 0x10000>;
                                interrupts = <22>;
+                               fifosize = <128>;
                                clocks = <&clks 30>;
+                               sirf,usp-dma-rx-channel = <10>;
+                               sirf,usp-dma-tx-channel = <11>;
                        };
 
                        dmac0: dma-controller@b00b0000 {
                                compatible = "sirf,prima2-vip";
                                reg = <0xb00C0000 0x10000>;
                                clocks = <&clks 31>;
+                               interrupts = <14>;
+                               sirf,vip-dma-rx-channel = <16>;
                        };
 
                        spi0: spi@b00d0000 {
index 6c26caa880f2917344ada23a6a8e8e106412a0f9..658fcc537576b309ae06ecb74eaf65b3e3f2856c 100644 (file)
        };
 
        sdhi0: sdhi@ee100000 {
-               compatible = "renesas,r8a73a4-sdhi";
+               compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee100000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 165 4>;
        };
 
        sdhi1: sdhi@ee120000 {
-               compatible = "renesas,r8a73a4-sdhi";
+               compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee120000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 166 4>;
        };
 
        sdhi2: sdhi@ee140000 {
-               compatible = "renesas,r8a73a4-sdhi";
+               compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee140000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 167 4>;
index 45ac404ab6d8b79b246f6f5724f3609b48bf58cb..3577aba8258336bab80d44c3065c6e94f6ffe8a7 100644 (file)
@@ -96,6 +96,5 @@
        pfc: pfc@fffc0000 {
                compatible = "renesas,pfc-r8a7778";
                reg = <0xfffc000 0x118>;
-               #gpio-range-cells = <3>;
        };
 };
index 23a62447359c1a690f354bada3f3dbcabcbf6286..ebbe507fcbfa118280da96482dfaa109854f18c8 100644 (file)
        pfc: pfc@fffc0000 {
                compatible = "renesas,pfc-r8a7779";
                reg = <0xfffc0000 0x23c>;
-               #gpio-range-cells = <3>;
        };
 
        thermal@ffc48000 {
index 3b879e7c697c336748d5f35adf6744c6d7e14458..413b4c29e782d7ded622563b8a458e28d382db19 100644 (file)
        pfc: pfc@e6060000 {
                compatible = "renesas,pfc-r8a7790";
                reg = <0 0xe6060000 0 0x250>;
-               #gpio-range-cells = <3>;
        };
 
        sdhi0: sdhi@ee100000 {
-               compatible = "renesas,r8a7790-sdhi";
+               compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee100000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 165 4>;
        };
 
        sdhi1: sdhi@ee120000 {
-               compatible = "renesas,r8a7790-sdhi";
+               compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee120000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 166 4>;
        };
 
        sdhi2: sdhi@ee140000 {
-               compatible = "renesas,r8a7790-sdhi";
+               compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee140000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 167 4>;
        };
 
        sdhi3: sdhi@ee160000 {
-               compatible = "renesas,r8a7790-sdhi";
+               compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee160000 0 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 168 4>;
index ba59a5875a10689d14fd96d9102e21814baf2a54..3955c7606a6f45a33036bec612ad918806315dee 100644 (file)
        };
 
        sdhi0: sdhi@ee100000 {
-               compatible = "renesas,r8a7740-sdhi";
+               compatible = "renesas,sdhi-r8a7740";
                reg = <0xee100000 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 83 4
 
        /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */
        sdhi1: sdhi@ee120000 {
-               compatible = "renesas,r8a7740-sdhi";
+               compatible = "renesas,sdhi-r8a7740";
                reg = <0xee120000 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 88 4
        };
 
        sdhi2: sdhi@ee140000 {
-               compatible = "renesas,r8a7740-sdhi";
+               compatible = "renesas,sdhi-r8a7740";
                reg = <0xee140000 0x100>;
                interrupt-parent = <&gic>;
                interrupts = <0 104 4
index 06ea7d42ce8e6bb9f72633a14abd2ace113fa712..2a45092a40e3251e44447f3349e80ee0a69e97a4 100644 (file)
 #   $4 - default install path (blank if root directory)
 #
 
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
 # User may have a custom install script
 if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
 if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
index 8c60f473e97625cb8c0bb068375da7368d1ff6fe..5c8584c4944d780f90bcedb40b8224fb60fa364f 100644 (file)
@@ -17,3 +17,5 @@ obj-$(CONFIG_MCPM)            += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 AFLAGS_mcpm_head.o             := -march=armv7-a
 AFLAGS_vlock.o                 := -march=armv7-a
 obj-$(CONFIG_TI_PRIV_EDMA)     += edma.o
+obj-$(CONFIG_BL_SWITCHER)      += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644 (file)
index 0000000..63bbc4f
--- /dev/null
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+       unsigned int id;
+       asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
+       return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+       struct timespec ts;
+       getnstimeofday(&ts);
+       return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+       unsigned ib_mpidr, ib_cpu, ib_cluster;
+       long volatile handshake, **handshake_ptr = _arg;
+
+       pr_debug("%s\n", __func__);
+
+       ib_mpidr = cpu_logical_map(smp_processor_id());
+       ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+       ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+       /* Advertise our handshake location */
+       if (handshake_ptr) {
+               handshake = 0;
+               *handshake_ptr = &handshake;
+       } else
+               handshake = -1;
+
+       /*
+        * Our state has been saved at this point.  Let's release our
+        * inbound CPU.
+        */
+       mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+       sev();
+
+       /*
+        * From this point, we must assume that our counterpart CPU might
+        * have taken over in its parallel world already, as if execution
+        * just returned from cpu_suspend().  It is therefore important to
+        * be very careful not to make any change the other guy is not
+        * expecting.  This is why we need stack isolation.
+        *
+        * Fancy under cover tasks could be performed here.  For now
+        * we have none.
+        */
+
+       /*
+        * Let's wait until our inbound is alive.
+        */
+       while (!handshake) {
+               wfe();
+               smp_mb();
+       }
+
+       /* Let's put ourself down. */
+       mcpm_cpu_power_down();
+
+       /* should never get here */
+       BUG();
+}
+
+/*
+ * Stack isolation.  To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+       unsigned int mpidr = read_mpidr();
+       unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       void *stack = current_thread_info() + 1;
+       stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+       stack += clusterid * STACK_SIZE + STACK_SIZE;
+       call_with_stack(bL_do_switch, (void *)_arg, stack);
+       BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+       unsigned int mpidr, this_cpu, that_cpu;
+       unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+       struct completion inbound_alive;
+       struct tick_device *tdev;
+       enum clock_event_mode tdev_mode;
+       long volatile *handshake_ptr;
+       int ipi_nr, ret;
+
+       this_cpu = smp_processor_id();
+       ob_mpidr = read_mpidr();
+       ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+       ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+       BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+       if (new_cluster_id == ob_cluster)
+               return 0;
+
+       that_cpu = bL_switcher_cpu_pairing[this_cpu];
+       ib_mpidr = cpu_logical_map(that_cpu);
+       ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+       ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+       pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+                this_cpu, ob_mpidr, ib_mpidr);
+
+       this_cpu = smp_processor_id();
+
+       /* Close the gate for our entry vectors */
+       mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+       mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+       /* Install our "inbound alive" notifier. */
+       init_completion(&inbound_alive);
+       ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+       ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+       mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+       /*
+        * Let's wake up the inbound CPU now in case it requires some delay
+        * to come online, but leave it gated in our entry vector code.
+        */
+       ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+       if (ret) {
+               pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+               return ret;
+       }
+
+       /*
+        * Raise a SGI on the inbound CPU to make sure it doesn't stall
+        * in a possible WFI, such as in bL_power_down().
+        */
+       gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+       /*
+        * Wait for the inbound to come up.  This allows for other
+        * tasks to be scheduled in the mean time.
+        */
+       wait_for_completion(&inbound_alive);
+       mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+       /*
+        * From this point we are entering the switch critical zone
+        * and can't take any interrupts anymore.
+        */
+       local_irq_disable();
+       local_fiq_disable();
+       trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+       /* redirect GIC's SGIs to our counterpart */
+       gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+       tdev = tick_get_device(this_cpu);
+       if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+               tdev = NULL;
+       if (tdev) {
+               tdev_mode = tdev->evtdev->mode;
+               clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+       }
+
+       ret = cpu_pm_enter();
+
+       /* we can not tolerate errors at this point */
+       if (ret)
+               panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+       /* Swap the physical CPUs in the logical map for this logical CPU. */
+       cpu_logical_map(this_cpu) = ib_mpidr;
+       cpu_logical_map(that_cpu) = ob_mpidr;
+
+       /* Let's do the actual CPU switch. */
+       ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+       if (ret > 0)
+               panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+       /* We are executing on the inbound CPU at this point */
+       mpidr = read_mpidr();
+       pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+       BUG_ON(mpidr != ib_mpidr);
+
+       mcpm_cpu_powered_up();
+
+       ret = cpu_pm_exit();
+
+       if (tdev) {
+               clockevents_set_mode(tdev->evtdev, tdev_mode);
+               clockevents_program_event(tdev->evtdev,
+                                         tdev->evtdev->next_event, 1);
+       }
+
+       trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+       local_fiq_enable();
+       local_irq_enable();
+
+       *handshake_ptr = 1;
+       dsb_sev();
+
+       if (ret)
+               pr_err("%s exiting with error %d\n", __func__, ret);
+       return ret;
+}
+
+struct bL_thread {
+       spinlock_t lock;
+       struct task_struct *task;
+       wait_queue_head_t wq;
+       int wanted_cluster;
+       struct completion started;
+       bL_switch_completion_handler completer;
+       void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+       struct bL_thread *t = arg;
+       struct sched_param param = { .sched_priority = 1 };
+       int cluster;
+       bL_switch_completion_handler completer;
+       void *completer_cookie;
+
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+       complete(&t->started);
+
+       do {
+               if (signal_pending(current))
+                       flush_signals(current);
+               wait_event_interruptible(t->wq,
+                               t->wanted_cluster != -1 ||
+                               kthread_should_stop());
+
+               spin_lock(&t->lock);
+               cluster = t->wanted_cluster;
+               completer = t->completer;
+               completer_cookie = t->completer_cookie;
+               t->wanted_cluster = -1;
+               t->completer = NULL;
+               spin_unlock(&t->lock);
+
+               if (cluster != -1) {
+                       bL_switch_to(cluster);
+
+                       if (completer)
+                               completer(completer_cookie);
+               }
+       } while (!kthread_should_stop());
+
+       return 0;
+}
+
+static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
+{
+       struct task_struct *task;
+
+       task = kthread_create_on_node(bL_switcher_thread, arg,
+                                     cpu_to_node(cpu), "kswitcher_%d", cpu);
+       if (!IS_ERR(task)) {
+               kthread_bind(task, cpu);
+               wake_up_process(task);
+       } else
+               pr_err("%s failed for CPU %d\n", __func__, cpu);
+       return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ *      with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback.  if non-NULL,
+ *     @completer(@completer_cookie) will be called on completion of
+ *     the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread.  This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete.  This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns.  When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+                        bL_switch_completion_handler completer,
+                        void *completer_cookie)
+{
+       struct bL_thread *t;
+
+       if (cpu >= ARRAY_SIZE(bL_threads)) {
+               pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+               return -EINVAL;
+       }
+
+       t = &bL_threads[cpu];
+
+       if (IS_ERR(t->task))
+               return PTR_ERR(t->task);
+       if (!t->task)
+               return -ESRCH;
+
+       spin_lock(&t->lock);
+       if (t->completer) {
+               spin_unlock(&t->lock);
+               return -EBUSY;
+       }
+       t->completer = completer;
+       t->completer_cookie = completer_cookie;
+       t->wanted_cluster = new_cluster_id;
+       spin_unlock(&t->lock);
+       wake_up(&t->wq);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+       int ret;
+
+       ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+       if (ret & NOTIFY_STOP_MASK)
+               pr_err("%s: notifier chain failed with status 0x%x\n",
+                       __func__, ret);
+       return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+       int i;
+
+       for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+               cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+       int i, j, cluster_0, gic_id, ret;
+       unsigned int cpu, cluster, mask;
+       cpumask_t available_cpus;
+
+       /* First pass to validate what we have */
+       mask = 0;
+       for_each_online_cpu(i) {
+               cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+               if (cluster >= 2) {
+                       pr_err("%s: only dual cluster systems are supported\n", __func__);
+                       return -EINVAL;
+               }
+               if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+                       return -EINVAL;
+               mask |= (1 << cluster);
+       }
+       if (mask != 3) {
+               pr_err("%s: no CPU pairing possible\n", __func__);
+               return -EINVAL;
+       }
+
+       /*
+        * Now let's do the pairing.  We match each CPU with another CPU
+        * from a different cluster.  To get a uniform scheduling behavior
+        * without fiddling with CPU topology and compute capacity data,
+        * we'll use logical CPUs initially belonging to the same cluster.
+        */
+       memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+       cpumask_copy(&available_cpus, cpu_online_mask);
+       cluster_0 = -1;
+       for_each_cpu(i, &available_cpus) {
+               int match = -1;
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+               if (cluster_0 == -1)
+                       cluster_0 = cluster;
+               if (cluster != cluster_0)
+                       continue;
+               cpumask_clear_cpu(i, &available_cpus);
+               for_each_cpu(j, &available_cpus) {
+                       cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+                       /*
+                        * Let's remember the last match to create "odd"
+                        * pairings on purpose in order for other code not
+                        * to assume any relation between physical and
+                        * logical CPU numbers.
+                        */
+                       if (cluster != cluster_0)
+                               match = j;
+               }
+               if (match != -1) {
+                       bL_switcher_cpu_pairing[i] = match;
+                       cpumask_clear_cpu(match, &available_cpus);
+                       pr_info("CPU%d paired with CPU%d\n", i, match);
+               }
+       }
+
+       /*
+        * Now we disable the unwanted CPUs i.e. everything that has no
+        * pairing information (that includes the pairing counterparts).
+        */
+       cpumask_clear(&bL_switcher_removed_logical_cpus);
+       for_each_online_cpu(i) {
+               cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+               /* Let's take note of the GIC ID for this CPU */
+               gic_id = gic_get_cpu_id(i);
+               if (gic_id < 0) {
+                       pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+                       bL_switcher_restore_cpus();
+                       return -EINVAL;
+               }
+               bL_gic_id[cpu][cluster] = gic_id;
+               pr_info("GIC ID for CPU %u cluster %u is %u\n",
+                       cpu, cluster, gic_id);
+
+               if (bL_switcher_cpu_pairing[i] != -1) {
+                       bL_switcher_cpu_original_cluster[i] = cluster;
+                       continue;
+               }
+
+               ret = cpu_down(i);
+               if (ret) {
+                       bL_switcher_restore_cpus();
+                       return ret;
+               }
+               cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+       }
+
+       return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+       int cpu;
+
+       if (!bL_switcher_active)
+               return -EUNATCH;
+
+       mpidr &= MPIDR_HWID_BITMASK;
+       for_each_online_cpu(cpu) {
+               int pairing = bL_switcher_cpu_pairing[cpu];
+               if (pairing == -1)
+                       continue;
+               if ((mpidr == cpu_logical_map(cpu)) ||
+                   (mpidr == cpu_logical_map(pairing)))
+                       return cpu;
+       }
+       return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+       trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+       int ret;
+
+       preempt_disable();
+
+       bL_switcher_trace_trigger_cpu(NULL);
+       ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+       preempt_enable();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+       int cpu, ret;
+
+       mutex_lock(&bL_switcher_activation_lock);
+       cpu_hotplug_driver_lock();
+       if (bL_switcher_active) {
+               cpu_hotplug_driver_unlock();
+               mutex_unlock(&bL_switcher_activation_lock);
+               return 0;
+       }
+
+       pr_info("big.LITTLE switcher initializing\n");
+
+       ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+       if (ret)
+               goto error;
+
+       ret = bL_switcher_halve_cpus();
+       if (ret)
+               goto error;
+
+       bL_switcher_trace_trigger();
+
+       for_each_online_cpu(cpu) {
+               struct bL_thread *t = &bL_threads[cpu];
+               spin_lock_init(&t->lock);
+               init_waitqueue_head(&t->wq);
+               init_completion(&t->started);
+               t->wanted_cluster = -1;
+               t->task = bL_switcher_thread_create(cpu, t);
+       }
+
+       bL_switcher_active = 1;
+       bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+       pr_info("big.LITTLE switcher initialized\n");
+       goto out;
+
+error:
+       pr_warn("big.LITTLE switcher initialization failed\n");
+       bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+       cpu_hotplug_driver_unlock();
+       mutex_unlock(&bL_switcher_activation_lock);
+       return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+       unsigned int cpu, cluster;
+       struct bL_thread *t;
+       struct task_struct *task;
+
+       mutex_lock(&bL_switcher_activation_lock);
+       cpu_hotplug_driver_lock();
+
+       if (!bL_switcher_active)
+               goto out;
+
+       if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+               bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+               goto out;
+       }
+
+       bL_switcher_active = 0;
+
+       /*
+        * To deactivate the switcher, we must shut down the switcher
+        * threads to prevent any other requests from being accepted.
+        * Then, if the final cluster for given logical CPU is not the
+        * same as the original one, we'll recreate a switcher thread
+        * just for the purpose of switching the CPU back without any
+        * possibility for interference from external requests.
+        */
+       for_each_online_cpu(cpu) {
+               t = &bL_threads[cpu];
+               task = t->task;
+               t->task = NULL;
+               if (!task || IS_ERR(task))
+                       continue;
+               kthread_stop(task);
+               /* no more switch may happen on this CPU at this point */
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+               if (cluster == bL_switcher_cpu_original_cluster[cpu])
+                       continue;
+               init_completion(&t->started);
+               t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+               task = bL_switcher_thread_create(cpu, t);
+               if (!IS_ERR(task)) {
+                       wait_for_completion(&t->started);
+                       kthread_stop(task);
+                       cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+                       if (cluster == bL_switcher_cpu_original_cluster[cpu])
+                               continue;
+               }
+               /* If execution gets here, we're in trouble. */
+               pr_crit("%s: unable to restore original cluster for CPU %d\n",
+                       __func__, cpu);
+               pr_crit("%s: CPU %d can't be restored\n",
+                       __func__, bL_switcher_cpu_pairing[cpu]);
+               cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+                                 &bL_switcher_removed_logical_cpus);
+       }
+
+       bL_switcher_restore_cpus();
+       bL_switcher_trace_trigger();
+
+       bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+       cpu_hotplug_driver_unlock();
+       mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+
+       switch (buf[0]) {
+       case '0':
+               bL_switcher_disable();
+               ret = 0;
+               break;
+       case '1':
+               ret = bL_switcher_enable();
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int ret = bL_switcher_trace_trigger();
+
+       return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+       __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+       __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+       &bL_switcher_active_attr.attr,
+       &bL_switcher_trace_trigger_attr.attr,
+       NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+       .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+       int ret;
+
+       bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+       if (!bL_switcher_kobj)
+               return -ENOMEM;
+       ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+       if (ret)
+               kobject_put(bL_switcher_kobj);
+       return ret;
+}
+
+#endif  /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+       mutex_lock(&bL_switcher_activation_lock);
+
+       return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+       mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation on those CPUs we've removed
+ * while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       if (bL_switcher_active) {
+               int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
+               switch (action & 0xf) {
+               case CPU_UP_PREPARE:
+               case CPU_DOWN_PREPARE:
+                       if (pairing == -1)
+                               return NOTIFY_BAD;
+               }
+       }
+       return NOTIFY_DONE;
+}
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+       int ret;
+
+       if (MAX_NR_CLUSTERS != 2) {
+               pr_err("%s: only dual cluster systems are supported\n", __func__);
+               return -EINVAL;
+       }
+
+       cpu_notifier(bL_switcher_hotplug_callback, 0);
+
+       if (!no_bL_switcher) {
+               ret = bL_switcher_enable();
+               if (ret)
+                       return ret;
+       }
+
+#ifdef CONFIG_SYSFS
+       ret = bL_switcher_sysfs_init();
+       if (ret)
+               pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+       return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644 (file)
index 0000000..3f47f12
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       unsigned char val[3];
+       unsigned int cpu, cluster;
+       int ret;
+
+       pr_debug("%s\n", __func__);
+
+       if (len < 3)
+               return -EINVAL;
+
+       if (copy_from_user(val, buf, 3))
+               return -EFAULT;
+
+       /* format: <cpu#>,<cluster#> */
+       if (val[0] < '0' || val[0] > '9' ||
+           val[1] != ',' ||
+           val[2] < '0' || val[2] > '1')
+               return -EINVAL;
+
+       cpu = val[0] - '0';
+       cluster = val[2] - '0';
+       ret = bL_switch_request(cpu, cluster);
+
+       return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+       .write          = bL_switcher_write,
+       .owner  = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+       MISC_DYNAMIC_MINOR,
+       "b.L_switcher",
+       &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+       return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+       misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
index 117f955a2a063b7e57cbbd0a05806ce3413814a7..8e1a0245907f85be1a460bfa785f744daf285d6f 100644 (file)
@@ -269,6 +269,11 @@ static const struct edmacc_param dummy_paramset = {
        .ccnt = 1,
 };
 
+static const struct of_device_id edma_of_ids[] = {
+       { .compatible = "ti,edma3", },
+       {}
+};
+
 /*****************************************************************************/
 
 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
@@ -560,14 +565,38 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
 static int prepare_unused_channel_list(struct device *dev, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
-       int i, ctlr;
+       int i, count, ctlr;
+       struct of_phandle_args  dma_spec;
 
+       if (dev->of_node) {
+               count = of_property_count_strings(dev->of_node, "dma-names");
+               if (count < 0)
+                       return 0;
+               for (i = 0; i < count; i++) {
+                       if (of_parse_phandle_with_args(dev->of_node, "dmas",
+                                                      "#dma-cells", i,
+                                                      &dma_spec))
+                               continue;
+
+                       if (!of_match_node(edma_of_ids, dma_spec.np)) {
+                               of_node_put(dma_spec.np);
+                               continue;
+                       }
+
+                       clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
+                                 edma_cc[0]->edma_unused);
+                       of_node_put(dma_spec.np);
+               }
+               return 0;
+       }
+
+       /* For non-OF case */
        for (i = 0; i < pdev->num_resources; i++) {
                if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
                                (int)pdev->resource[i].start >= 0) {
                        ctlr = EDMA_CTLR(pdev->resource[i].start);
                        clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
-                                       edma_cc[ctlr]->edma_unused);
+                                 edma_cc[ctlr]->edma_unused);
                }
        }
 
@@ -1762,11 +1791,6 @@ static int edma_probe(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id edma_of_ids[] = {
-       { .compatible = "ti,edma3", },
-       {}
-};
-
 static struct platform_driver edma_driver = {
        .driver = {
                .name   = "edma",
index 370236dd1a03309ee3b123e705aa80132c908427..24a9804b8f5e062613532c8a678b1b826b9e7620 100644 (file)
@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
        sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
 }
 
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+                        unsigned long poke_phys_addr, unsigned long poke_val)
+{
+       unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+       poke[0] = poke_phys_addr;
+       poke[1] = poke_val;
+       __cpuc_flush_dcache_area((void *)poke, 8);
+       outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
 static const struct mcpm_platform_ops *platform_ops;
 
 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
@@ -51,7 +63,8 @@ void mcpm_cpu_power_down(void)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
+               return;
        BUG_ON(!irqs_disabled());
 
        /*
@@ -93,7 +106,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+               return;
        BUG_ON(!irqs_disabled());
 
        /* Very similar to mcpm_cpu_power_down() */
index 39c96df3477a41549d71e1a18733dd85f4a168df..49dd5352fe70336ff023d3c40097a03d4370902f 100644 (file)
@@ -71,12 +71,19 @@ ENTRY(mcpm_entry_point)
         * position independent way.
         */
        adr     r5, 3f
-       ldmia   r5, {r6, r7, r8, r11}
+       ldmia   r5, {r0, r6, r7, r8, r11}
+       add     r0, r5, r0                      @ r0 = mcpm_entry_early_pokes
        add     r6, r5, r6                      @ r6 = mcpm_entry_vectors
        ldr     r7, [r5, r7]                    @ r7 = mcpm_power_up_setup_phys
        add     r8, r5, r8                      @ r8 = mcpm_sync
        add     r11, r5, r11                    @ r11 = first_man_locks
 
+       @ Perform an early poke, if any
+       add     r0, r0, r4, lsl #3
+       ldmia   r0, {r0, r1}
+       teq     r0, #0
+       strne   r1, [r0]
+
        mov     r0, #MCPM_SYNC_CLUSTER_SIZE
        mla     r8, r0, r10, r8                 @ r8 = sync cluster base
 
@@ -195,7 +202,8 @@ mcpm_entry_gated:
 
        .align  2
 
-3:     .word   mcpm_entry_vectors - .
+3:     .word   mcpm_entry_early_pokes - .
+       .word   mcpm_entry_vectors - 3b
        .word   mcpm_power_up_setup_phys - 3b
        .word   mcpm_sync - 3b
        .word   first_man_locks - 3b
@@ -214,6 +222,10 @@ first_man_locks:
 ENTRY(mcpm_entry_vectors)
        .space  4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
 
+       .type   mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+       .space  8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
        .type   mcpm_power_up_setup_phys, #object
 ENTRY(mcpm_power_up_setup_phys)
        .space  4               @ set by mcpm_sync_init()
index d56c932580eb201439c8a63aa401abddd157cbbf..025f6ce38596736eea2c929a211b23fd4eaf0d56 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <asm/mach/sharpsl_param.h>
+#include <asm/memory.h>
 
 /*
  * Certain hardware parameters determined at the time of device manufacture,
  */
 #ifdef CONFIG_ARCH_SA1100
 #define PARAM_BASE     0xe8ffc000
+#define param_start(x) (void *)(x)
 #else
 #define PARAM_BASE     0xa0000a00
+#define param_start(x) __va(x)
 #endif
 #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 )  | ( b << 8 ) | a )
 
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
 
 void sharpsl_save_param(void)
 {
-       memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
+       memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
 
        if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
                sharpsl_param.comadj=-1;
index e901d0f3e0bbcd735f5cf7e62bd653536ff8aa71..ce922d0ea7aa85daa59c408ac5cd79beab5459a6 100644 (file)
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
 
 static struct irqaction sp804_timer_irq = {
        .name           = "timer",
-       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .flags          = IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = sp804_timer_interrupt,
        .dev_id         = &sp804_clockevent,
 };
index 317960f1248893d6200650ec9c01e8132b627c16..0142ec37e0be26d8c1480aa4929a722896b3b397 100644 (file)
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_MODULES=y
@@ -11,11 +12,11 @@ CONFIG_ARCH_SA1100=y
 CONFIG_SA1100_H3600=y
 CONFIG_PCCARD=y
 CONFIG_PCMCIA_SA1100=y
+CONFIG_PREEMPT=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -24,13 +25,10 @@ CONFIG_IRDA=m
 CONFIG_IRLAN=m
 CONFIG_IRNET=m
 CONFIG_IRCOMM=m
-CONFIG_SA1100_FIR=m
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,19 +39,15 @@ CONFIG_MTD_SA1100=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECS=y
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_NET_PCMCIA=y
 CONFIG_PCMCIA_PCNET=y
 CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_WLAN is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -64,8 +58,6 @@ CONFIG_SERIAL_SA1100_CONSOLE=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_SA1100=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_MSDOS_FS=m
@@ -74,6 +66,4 @@ CONFIG_JFFS2_FS=y
 CONFIG_CRAMFS=m
 CONFIG_NFS_FS=y
 CONFIG_NFSD=m
-CONFIG_SMB_FS=m
 CONFIG_NLS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
index 6e572c64cf5a1258008232a002301ab7925b0911..119fc378fc520f8ae5cb9074686570b6436bcd93 100644 (file)
@@ -36,6 +36,7 @@ CONFIG_ARCH_TEGRA_114_SOC=y
 CONFIG_TEGRA_PCI=y
 CONFIG_TEGRA_EMC_SCALING_ENABLE=y
 CONFIG_ARCH_U8500=y
+CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
 CONFIG_MACH_UX500_DT=y
 CONFIG_ARCH_VEXPRESS=y
@@ -46,6 +47,7 @@ CONFIG_ARCH_ZYNQ=y
 CONFIG_SMP=y
 CONFIG_HIGHPTE=y
 CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -133,6 +135,7 @@ CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_SPEAR=y
 CONFIG_MMC_OMAP=y
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644 (file)
index 0000000..6231d36
--- /dev/null
@@ -0,0 +1 @@
+aesbs-core.S
index a2c83851bc90a29f5f1d06415cb4a0db4dd726e1..81cda39860c5c7ad90a6710727011ec79296e5d8 100644 (file)
@@ -3,7 +3,17 @@
 #
 
 obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
 obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
 
-aes-arm-y  := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y      := aes-armv4.o aes_glue.o
+aes-arm-bs-y   := aesbs-core.o aesbs-glue.o
+sha1-arm-y     := sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+       $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
index 19d6cd6f29f98b95962cca5643dca61debdf7b8e..3a14ea8fe97e5cac183ad8c9148b816e473b4ae5 100644 (file)
@@ -148,7 +148,7 @@ AES_Te:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_encrypt)
-       sub     r3,pc,#8                @ AES_encrypt
+       adr     r3,AES_encrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
 .align 5
 ENTRY(private_AES_set_encrypt_key)
 _armv4_AES_set_encrypt_key:
-       sub     r3,pc,#8                @ AES_set_encrypt_key
+       adr     r3,_armv4_AES_set_encrypt_key
        teq     r0,#0
        moveq   r0,#-1
        beq     .Labrt
@@ -843,7 +843,7 @@ AES_Td:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_decrypt)
-       sub     r3,pc,#8                @ AES_decrypt
+       adr     r3,AES_decrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
index 59f7877ead6ac9ee3f8a31b43c6e0458de26cd8f..3003fa1f6fb4b9395c77340fbf83011b5cb0e419 100644 (file)
@@ -6,22 +6,12 @@
 #include <linux/crypto.h>
 #include <crypto/aes.h>
 
-#define AES_MAXNR 14
+#include "aes_glue.h"
 
-typedef struct {
-       unsigned int rd_key[4 *(AES_MAXNR + 1)];
-       int rounds;
-} AES_KEY;
-
-struct AES_CTX {
-       AES_KEY enc_key;
-       AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
                .cipher = {
                        .cia_min_keysize        = AES_MIN_KEY_SIZE,
                        .cia_max_keysize        = AES_MAX_KEY_SIZE,
-                       .cia_setkey                     = aes_set_key,
+                       .cia_setkey             = aes_set_key,
                        .cia_encrypt            = aes_encrypt,
                        .cia_decrypt            = aes_decrypt
                }
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644 (file)
index 0000000..cca3e51
--- /dev/null
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+       unsigned int rd_key[4 * (AES_MAXNR + 1)];
+       int rounds;
+};
+
+struct AES_CTX {
+       struct AES_KEY enc_key;
+       struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644 (file)
index 0000000..64205d4
--- /dev/null
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt      19.5 cycles per byte processed with 128-bit key
+@ decrypt      22.1 cycles per byte processed with 128-bit key
+@ key conv.    440  cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@                                              <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@                                      <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     r6,_bsaes_decrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       add     r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  r6!, {q8}               @ .LM0ISR
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Ldec_sbox:
+        veor   q1, q1, q4
+       veor    q3, q3, q4
+
+       veor    q4, q4, q7
+        veor   q1, q1, q6
+       veor    q2, q2, q7
+       veor    q6, q6, q4
+
+       veor    q0, q0, q1
+       veor    q2, q2, q5
+        veor   q7, q7, q6
+       veor    q3, q3, q0
+       veor    q5, q5, q0
+       veor    q1, q1, q3
+       veor    q11, q3, q0
+       veor    q10, q7, q4
+       veor    q9, q1, q6
+       veor    q13, q4, q0
+        vmov   q8, q10
+       veor    q12, q5, q2
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q6, q2
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q3, q7
+       veor    q12, q1, q5
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q4, q6
+       veor    q9, q9, q14
+       vand    q13, q0, q2
+       vand    q14, q7, q1
+       vorr    q15, q3, q5
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q5, q2
+       veor    q8, q1, q6
+       veor    q10, q15, q14
+       vand    q10, q10, q5
+       veor    q5, q5, q1
+       vand    q11, q1, q15
+       vand    q5, q5, q14
+       veor    q1, q11, q10
+       veor    q5, q5, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q2
+       veor    q12, q12, q8
+        veor   q2, q2, q6
+       vand    q8, q8, q15
+        vand   q6, q6, q13
+       vand    q12, q12, q14
+        vand   q2, q2, q9
+       veor    q8, q8, q12
+        veor   q2, q2, q6
+       veor    q12, q12, q11
+        veor   q6, q6, q10
+       veor    q5, q5, q12
+       veor    q2, q2, q12
+       veor    q1, q1, q8
+       veor    q6, q6, q8
+
+       veor    q12, q3, q0
+       veor    q8, q7, q4
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q4
+       vand    q8, q8, q15
+        vand   q4, q4, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q4
+       veor    q12, q12, q11
+        veor   q4, q4, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q3
+       veor    q3, q3, q7
+       vand    q11, q7, q15
+       vand    q3, q3, q14
+       veor    q7, q11, q10
+       veor    q3, q3, q11
+       veor    q3, q3, q12
+       veor    q0, q0, q12
+       veor    q7, q7, q8
+       veor    q4, q4, q8
+       veor    q1, q1, q7
+       veor    q6, q6, q5
+
+       veor    q4, q4, q1
+       veor    q2, q2, q7
+       veor    q5, q5, q7
+       veor    q4, q4, q2
+        veor   q7, q7, q0
+       veor    q4, q4, q5
+        veor   q3, q3, q6
+        veor   q6, q6, q1
+       veor    q3, q3, q4
+
+       veor    q4, q4, q0
+       veor    q7, q7, q3
+       subs    r5,r5,#1
+       bcc     .Ldec_done
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  q8, q0, q0, #8
+       vext.8  q14, q3, q3, #8
+       vext.8  q15, q5, q5, #8
+       veor    q8, q8, q0
+       vext.8  q9, q1, q1, #8
+       veor    q14, q14, q3
+       vext.8  q10, q6, q6, #8
+       veor    q15, q15, q5
+       vext.8  q11, q4, q4, #8
+       veor    q9, q9, q1
+       vext.8  q12, q2, q2, #8
+       veor    q10, q10, q6
+       vext.8  q13, q7, q7, #8
+       veor    q11, q11, q4
+       veor    q12, q12, q2
+       veor    q13, q13, q7
+
+        veor   q0, q0, q14
+        veor   q1, q1, q14
+        veor   q6, q6, q8
+        veor   q2, q2, q10
+        veor   q4, q4, q9
+        veor   q1, q1, q15
+        veor   q6, q6, q15
+        veor   q2, q2, q14
+        veor   q7, q7, q11
+        veor   q4, q4, q14
+        veor   q3, q3, q12
+        veor   q2, q2, q15
+        veor   q7, q7, q15
+        veor   q5, q5, q13
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q6, q6, #12
+        veor   q1, q1, q9
+       vext.8  q11, q4, q4, #12
+        veor   q6, q6, q10
+       vext.8  q12, q2, q2, #12
+        veor   q4, q4, q11
+       vext.8  q13, q7, q7, #12
+        veor   q2, q2, q12
+       vext.8  q14, q3, q3, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q3, q3, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q2
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q2, q2, #8
+       veor    q12, q12, q4
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q3
+        vext.8 q2, q4, q4, #8
+       veor    q11, q11, q6
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q4, q3, q3, #8
+       veor    q11, q11, q5
+        vext.8 q3, q6, q6, #8
+       veor    q5, q9, q13
+       veor    q11, q11, q2
+       veor    q7, q7, q15
+       veor    q6, q4, q14
+       veor    q4, q8, q12
+       veor    q2, q3, q10
+       vmov    q3, q11
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Ldec_loop
+       vldmia  r6, {q12}               @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q3, #1
+        vshr.u64       q11, q2, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q4
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q2, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q4
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q4, #4
+        vshr.u64       q11, q6, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q4, q4, q10
+        veor           q6, q6, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q6, q6, q8
+       veor    q4, q4, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q8
+       veor    q3, q3, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     r6,_bsaes_encrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       sub     r6,r6,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  r6!, {q8}               @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Lenc_sbox:
+       veor    q2, q2, q1
+       veor    q5, q5, q6
+       veor    q3, q3, q0
+       veor    q6, q6, q2
+       veor    q5, q5, q0
+
+       veor    q6, q6, q3
+       veor    q3, q3, q7
+       veor    q7, q7, q5
+       veor    q3, q3, q4
+       veor    q4, q4, q5
+
+       veor    q2, q2, q7
+       veor    q3, q3, q1
+       veor    q1, q1, q5
+       veor    q11, q7, q4
+       veor    q10, q1, q2
+       veor    q9, q5, q3
+       veor    q13, q2, q4
+        vmov   q8, q10
+       veor    q12, q6, q0
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q3, q0
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q7, q1
+       veor    q12, q5, q6
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q2, q3
+       veor    q9, q9, q14
+       vand    q13, q4, q0
+       vand    q14, q1, q5
+       vorr    q15, q7, q6
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q6, q0
+       veor    q8, q5, q3
+       veor    q10, q15, q14
+       vand    q10, q10, q6
+       veor    q6, q6, q5
+       vand    q11, q5, q15
+       vand    q6, q6, q14
+       veor    q5, q11, q10
+       veor    q6, q6, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q3
+       vand    q8, q8, q15
+        vand   q3, q3, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q3
+       veor    q12, q12, q11
+        veor   q3, q3, q10
+       veor    q6, q6, q12
+       veor    q0, q0, q12
+       veor    q5, q5, q8
+       veor    q3, q3, q8
+
+       veor    q12, q7, q4
+       veor    q8, q1, q2
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q4
+       veor    q12, q12, q8
+        veor   q4, q4, q2
+       vand    q8, q8, q15
+        vand   q2, q2, q13
+       vand    q12, q12, q14
+        vand   q4, q4, q9
+       veor    q8, q8, q12
+        veor   q4, q4, q2
+       veor    q12, q12, q11
+        veor   q2, q2, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q7
+       veor    q7, q7, q1
+       vand    q11, q1, q15
+       vand    q7, q7, q14
+       veor    q1, q11, q10
+       veor    q7, q7, q11
+       veor    q7, q7, q12
+       veor    q4, q4, q12
+       veor    q1, q1, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q0
+       veor    q1, q1, q6
+       veor    q6, q6, q0
+       veor    q4, q4, q7
+       veor    q0, q0, q1
+
+       veor    q1, q1, q5
+       veor    q5, q5, q2
+       veor    q2, q2, q3
+       veor    q3, q3, q5
+       veor    q4, q4, q5
+
+       veor    q6, q6, q3
+       subs    r5,r5,#1
+       bcc     .Lenc_done
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q4, q4, #12
+        veor   q1, q1, q9
+       vext.8  q11, q6, q6, #12
+        veor   q4, q4, q10
+       vext.8  q12, q3, q3, #12
+        veor   q6, q6, q11
+       vext.8  q13, q7, q7, #12
+        veor   q3, q3, q12
+       vext.8  q14, q2, q2, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q2, q2, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q3
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q3, q3, #8
+       veor    q12, q12, q6
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q2
+        vext.8 q3, q6, q6, #8
+       veor    q11, q11, q4
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q6, q2, q2, #8
+       veor    q11, q11, q5
+        vext.8 q2, q4, q4, #8
+       veor    q5, q9, q13
+       veor    q4, q8, q12
+       veor    q3, q3, q11
+       veor    q7, q7, q15
+       veor    q6, q6, q14
+        @ vmov q4, q8
+       veor    q2, q2, q10
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Lenc_loop
+       vldmia  r6, {q12}               @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q3, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q4, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q6
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q4, q4, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q3, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q6
+        veor           q11, q11, q4
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #2
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q6, #4
+        vshr.u64       q11, q4, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q4, q4, q8
+       veor    q6, q6, q8
+       veor    q3, q3, q8
+       veor    q7, q7, q8
+       veor    q2, q2, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     r6,_bsaes_key_convert
+       vld1.8  {q7},  [r4]!            @ load round 0 key
+       sub     r6,r6,#_bsaes_key_convert-.LM0
+       vld1.8  {q15}, [r4]!            @ load round 1 key
+
+       vmov.i8 q8,  #0x01                      @ bit masks
+       vmov.i8 q9,  #0x02
+       vmov.i8 q10, #0x04
+       vmov.i8 q11, #0x08
+       vmov.i8 q12, #0x10
+       vmov.i8 q13, #0x20
+       vldmia  r6, {q14}               @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        q7,  q7
+       vrev32.8        q15, q15
+#endif
+       sub     r5,r5,#1
+       vstmia  r12!, {q7}              @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  d14,{q15},d28
+       vtbl.8  d15,{q15},d29
+       vmov.i8 q6,  #0x40
+       vmov.i8 q15, #0x80
+
+       vtst.8  q0, q7, q8
+       vtst.8  q1, q7, q9
+       vtst.8  q2, q7, q10
+       vtst.8  q3, q7, q11
+       vtst.8  q4, q7, q12
+       vtst.8  q5, q7, q13
+       vtst.8  q6, q7, q6
+       vtst.8  q7, q7, q15
+       vld1.8  {q15}, [r4]!            @ load next round key
+       vmvn    q0, q0          @ "pnot"
+       vmvn    q1, q1
+       vmvn    q5, q5
+       vmvn    q6, q6
+#ifdef __ARMEL__
+       vrev32.8        q15, q15
+#endif
+       subs    r5,r5,#1
+       vstmia  r12!,{q0-q7}            @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 q7,#0x63                        @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     r2, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ IV is 1st arg on the stack
+       mov     r2, r2, lsr#4           @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       vldmia  sp, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  sp, {q7}
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r3, #248
+       vldmia  r4, {q6}
+       vstmia  r12, {q15}                      @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {q15}, [r8]             @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    r2, r2, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {q0-q1}, [r0]!  @ load input
+       vld1.8  {q2-q3}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       vld1.8  {q4-q5}, [r0]!
+       mov     r5, r10
+       vld1.8  {q6-q7}, [r0]
+       sub     r0, r0, #0x60
+       vstmia  r9, {q15}                       @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q14-q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       veor    q5, q5, q14
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       vst1.8  {q5}, [r1]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    r2, r2, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {q0}, [r0]!             @ load input
+       cmp     r2, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {q1}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       mov     r5, r10
+       vstmia  r9, {q15}                       @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {q2}, [r0]!
+       cmp     r2, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {q3}, [r0]!
+       beq     .Lcbc_dec_four
+       vld1.8  {q4}, [r0]!
+       cmp     r2, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {q5}, [r0]!
+       beq     .Lcbc_dec_six
+       vld1.8  {q6}, [r0]!
+       sub     r0, r0, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     r0, r0, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  r9,{q14}                        @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     r0, r0, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q2, q2, q11
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     r0, r0, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     r0, r0, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     r0, r0, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8}, [r0]!             @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!            @ reload input
+       veor    q1, q1, q8
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     r0, r0, #0x10
+       mov     r10, r1                 @ save original out pointer
+       mov     r1, r9                  @ use the iv scratch space as out buffer
+       mov     r2, r3
+       vmov    q4,q15          @ just in case ensure that IV
+       vmov    q5,q0                   @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {q0}, [r9,:64]          @ load result
+       veor    q0, q0, q4      @ ^= IV
+       vmov    q15, q5         @ q5 holds input
+       vst1.8  {q0}, [r10]             @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10                       @ add sp,r9,#0x10 is no good for thumb
+       vst1.8  {q15}, [r8]             @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     r2, #8                  @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+       vld1.8  {q0}, [r8]              @ load counter
+       add     r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+       vldmia  sp, {q4}                @ load round0 key
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+.align 2
+0:     add     r12, r3, #248
+       vld1.8  {q0}, [r8]              @ load counter
+       adrl    r8, .LREVM0SR                   @ borrow r8
+       vldmia  r12, {q4}                       @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        q8,#1           @ compose 1<<96
+       veor            q9,q9,q9
+       vrev32.8        q0,q0
+       vext.8          q8,q9,q8,#4
+       vrev32.8        q4,q4
+       vadd.u32        q9,q8,q8        @ compose 2<<96
+       vstmia  sp, {q4}                @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        q10, q8, q9     @ compose 3<<96
+       vadd.u32        q1, q0, q8      @ +1
+       vadd.u32        q2, q0, q9      @ +2
+       vadd.u32        q3, q0, q10     @ +3
+       vadd.u32        q4, q1, q10
+       vadd.u32        q5, q2, q10
+       vadd.u32        q6, q3, q10
+       vadd.u32        q7, q4, q10
+       vadd.u32        q10, q5, q10    @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          sp, {q9}                @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x10           @ pass next round key
+#else
+       add             r4, r3, #264
+#endif
+       vldmia          r8, {q8}                        @ .LREVM0SR
+       mov             r5, r10                 @ pass rounds
+       vstmia          r9, {q10}                       @ save next counter
+       sub             r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            r2, r2, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {q8-q9}, [r0]!  @ load input
+       vld1.8          {q10-q11}, [r0]!
+       veor            q0, q8
+       veor            q1, q9
+       vld1.8          {q12-q13}, [r0]!
+       veor            q4, q10
+       veor            q6, q11
+       vld1.8          {q14-q15}, [r0]!
+       veor            q3, q12
+       vst1.8          {q0-q1}, [r1]!  @ write output
+       veor            q7, q13
+       veor            q2, q14
+       vst1.8          {q4}, [r1]!
+       veor            q5, q15
+       vst1.8          {q6}, [r1]!
+       vmov.i32        q8, #1                  @ compose 1<<96
+       vst1.8          {q3}, [r1]!
+       veor            q9, q9, q9
+       vst1.8          {q7}, [r1]!
+       vext.8          q8, q9, q8, #4
+       vst1.8          {q2}, [r1]!
+       vadd.u32        q9,q8,q8                @ compose 2<<96
+       vst1.8          {q5}, [r1]!
+       vldmia          r9, {q0}                        @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             r2, r2, #8
+       vld1.8          {q8}, [r0]!     @ load input
+       veor            q0, q8
+       vst1.8          {q0}, [r1]!     @ write output
+       cmp             r2, #2
+       blo             .Lctr_enc_done
+       vld1.8          {q9}, [r0]!
+       veor            q1, q9
+       vst1.8          {q1}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q10}, [r0]!
+       veor            q4, q10
+       vst1.8          {q4}, [r1]!
+       cmp             r2, #4
+       blo             .Lctr_enc_done
+       vld1.8          {q11}, [r0]!
+       veor            q6, q11
+       vst1.8          {q6}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q12}, [r0]!
+       veor            q3, q12
+       vst1.8          {q3}, [r1]!
+       cmp             r2, #6
+       blo             .Lctr_enc_done
+       vld1.8          {q13}, [r0]!
+       veor            q7, q13
+       vst1.8          {q7}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q14}, [r0]
+       veor            q2, q14
+       vst1.8          {q2}, [r1]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          sp, {q0-q1}
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10               @ add sp,r9,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, r0          @ copy arguments
+       mov     r5, r1
+       mov     r6, r2
+       mov     r7, r3
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {q1}, [ip]              @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {q1}, [sp,:64]  @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {q0}, [r4]!     @ load input
+       vld1.8  {q1}, [sp,:64]  @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    q0,q0,q1
+       vst1.8  {q0}, [r5]!     @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       subs    r9, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            r9, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, r8, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [r7], #1
+       ldrb            r1, [r8, #-0x10]
+       strb            r0, [r8, #-0x10]
+       strb            r1, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r10, #248
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       tst     r9, #0xf                        @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   r9, #0x10                       @ subtract another 16 bytes
+       subs    r9, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            r9, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+       mov             r5, r2                  @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+       mov             r2, r5
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          r2, {q5}
+       vshr.s64        q6, q8, #63
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vswp            d13,d12
+       veor            q9, q9, q6
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {q0}, [r7]!
+       mov             r0, sp
+       veor            q0, q0, q9
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q9
+       vst1.8          {q0}, [r8]
+
+       mov             r6, r8
+.Lxts_dec_steal:
+       ldrb            r1, [r8]
+       ldrb            r0, [r7], #1
+       strb            r1, [r8, #0x10]
+       strb            r0, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644 (file)
index 0000000..4522366
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+       struct AES_KEY  rk;
+       int             converted;
+       u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+                                          struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+       struct AES_KEY  enc;
+       struct BS_KEY   dec;
+};
+
+struct aesbs_ctr_ctx {
+       struct BS_KEY   enc;
+};
+
+struct aesbs_xts_ctx {
+       struct BS_KEY   enc;
+       struct BS_KEY   dec;
+       struct AES_KEY  twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->enc.converted = 0;
+       return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 4;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc.rk;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+       ctx->enc.converted = ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *src = walk.src.virt.addr;
+
+               if (walk.dst.virt.addr == walk.src.virt.addr) {
+                       u8 *iv = walk.iv;
+
+                       do {
+                               crypto_xor(src, iv, AES_BLOCK_SIZE);
+                               AES_encrypt(src, src, &ctx->enc);
+                               iv = src;
+                               src += AES_BLOCK_SIZE;
+                       } while (--blocks);
+                       memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+               } else {
+                       u8 *dst = walk.dst.virt.addr;
+
+                       do {
+                               crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+                               AES_encrypt(walk.iv, dst, &ctx->enc);
+                               memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+                               src += AES_BLOCK_SIZE;
+                               dst += AES_BLOCK_SIZE;
+                       } while (--blocks);
+               }
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+               kernel_neon_begin();
+               bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *dst = walk.dst.virt.addr;
+               u8 *src = walk.src.virt.addr;
+               u8 bk[2][AES_BLOCK_SIZE];
+               u8 *iv = walk.iv;
+
+               do {
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+                       AES_decrypt(src, dst, &ctx->dec.rk);
+                       crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               iv = bk[blocks & 1];
+                       else
+                               iv = src;
+
+                       dst += AES_BLOCK_SIZE;
+                       src += AES_BLOCK_SIZE;
+               } while (--blocks);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+       int i;
+
+       for (i = 3; i >= 0; i--, addend = 1) {
+               u32 n = be32_to_cpu(ctr[i]) + addend;
+
+               ctr[i] = cpu_to_be32(n);
+               if (n >= addend)
+                       break;
+       }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst, struct scatterlist *src,
+                            unsigned int nbytes)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       u32 blocks;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+               u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+               __be32 *ctr = (__be32 *)walk.iv;
+               u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+               /* avoid 32 bit counter overflow in the NEON code */
+               if (unlikely(headroom < blocks)) {
+                       blocks = headroom + 1;
+                       tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+               }
+               kernel_neon_begin();
+               bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+                                          walk.dst.virt.addr, blocks,
+                                          &ctx->enc, walk.iv);
+               kernel_neon_end();
+               inc_be128_ctr(ctr, blocks);
+
+               nbytes -= blocks * AES_BLOCK_SIZE;
+               if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+                       break;
+
+               err = blkcipher_walk_done(desc, &walk, tail);
+       }
+       if (walk.nbytes) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 ks[AES_BLOCK_SIZE];
+
+               AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+               if (tdst != tsrc)
+                       memcpy(tdst, tsrc, nbytes);
+               crypto_xor(tdst, ks, nbytes);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->enc, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+       .cra_name               = "__cbc-aes-neonbs",
+       .cra_driver_name        = "__driver-cbc-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_cbc_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_cbc_set_key,
+               .encrypt        = aesbs_cbc_encrypt,
+               .decrypt        = aesbs_cbc_decrypt,
+       },
+}, {
+       .cra_name               = "__ctr-aes-neonbs",
+       .cra_driver_name        = "__driver-ctr-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct aesbs_ctr_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_ctr_set_key,
+               .encrypt        = aesbs_ctr_encrypt,
+               .decrypt        = aesbs_ctr_encrypt,
+       },
+}, {
+       .cra_name               = "__xts-aes-neonbs",
+       .cra_driver_name        = "__driver-xts-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_xts_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_xts_set_key,
+               .encrypt        = aesbs_xts_encrypt,
+               .decrypt        = aesbs_xts_decrypt,
+       },
+}, {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "cbc-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = __ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "ctr(aes)",
+       .cra_driver_name        = "ctr-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "xts(aes)",
+       .cra_driver_name        = "xts-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+       if (!cpu_has_neon())
+               return -ENODEV;
+
+       return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+       crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644 (file)
index 0000000..f3d96d9
--- /dev/null
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt      19.5 cycles per byte processed with 128-bit key
+# decrypt      22.1 cycles per byte processed with 128-bit key
+# key conv.    440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#                                              <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#                                      <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InBasisChange  (@b);
+       &Inv_GF256      (@b[6,5,0,3,7,1,4,2],@t,@s);
+       &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[2], @b[2], @b[1]
+       veor    @b[5], @b[5], @b[6]
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[6], @b[6], @b[2]
+       veor    @b[5], @b[5], @b[0]
+
+       veor    @b[6], @b[6], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+       veor    @b[4], @b[4], @b[5]
+
+       veor    @b[2], @b[2], @b[7]
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[2], @b[2], @b[0]
+       veor    @b[6], @b[6], @b[1]
+
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[5], @b[5], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[2], @b[2], @b[5]
+
+       veor    @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb         > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb        > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InvInBasisChange       (@b);
+       &Inv_GF256              (@b[5,1,2,6,3,7,0,4],@t,@s);
+       &InvOutBasisChange      (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {         # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+        veor   @b[1], @b[1], @b[7]
+       veor    @b[4], @b[4], @b[7]
+
+       veor    @b[7], @b[7], @b[5]
+        veor   @b[1], @b[1], @b[3]
+       veor    @b[2], @b[2], @b[5]
+       veor    @b[3], @b[3], @b[7]
+
+       veor    @b[6], @b[6], @b[1]
+       veor    @b[2], @b[2], @b[0]
+        veor   @b[5], @b[5], @b[3]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {                # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[2], @b[2], @b[7]
+
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[4], @b[4], @b[5]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+        veor   @b[5], @b[5], @b[0]
+       veor    @b[3], @b[3], @b[7]
+        veor   @b[6], @b[6], @b[2]
+        veor   @b[2], @b[2], @b[1]
+       veor    @b[6], @b[6], @b[3]
+
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $t1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $t1, $t0
+       veor    $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {                                # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $x1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $x1, $x0
+       veor    $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+        veor   $t1, $y2, $y3
+       vand    $t0, $t0, $x0
+        vand   $t1, $t1, $x2
+       veor    $x0, $x0, $x1
+        veor   $x2, $x2, $x3
+       vand    $x1, $x1, $y0
+        vand   $x3, $x3, $y2
+       vand    $x0, $x0, $y1
+        vand   $x2, $x2, $y3
+       veor    $x1, $x1, $x0
+        veor   $x2, $x2, $x3
+       veor    $x0, $x0, $t0
+        veor   $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+       veor    @t[0], @x[0], @x[2]
+       veor    @t[1], @x[1], @x[3]
+___
+       &Mul_GF4        (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       Mul_GF4_N_GF4   (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @x[0], @x[0], @t[0]
+       veor    @x[2], @x[2], @t[0]
+       veor    @x[1], @x[1], @t[1]
+       veor    @x[3], @x[3], @t[1]
+
+       veor    @t[0], @x[4], @x[6]
+       veor    @t[1], @x[5], @x[7]
+___
+       &Mul_GF4_N_GF4  (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       &Mul_GF4        (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @x[4], @x[4], @t[0]
+       veor    @x[6], @x[6], @t[0]
+       veor    @x[5], @x[5], @t[1]
+       veor    @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+       veor    @t[3], @x[4], @x[6]
+       veor    @t[2], @x[5], @x[7]
+       veor    @t[1], @x[1], @x[3]
+       veor    @s[1], @x[7], @x[6]
+        vmov   @t[0], @t[2]
+       veor    @s[0], @x[0], @x[2]
+
+       vorr    @t[2], @t[2], @t[1]
+       veor    @s[3], @t[3], @t[0]
+       vand    @s[2], @t[3], @s[0]
+       vorr    @t[3], @t[3], @s[0]
+       veor    @s[0], @s[0], @t[1]
+       vand    @t[0], @t[0], @t[1]
+       veor    @t[1], @x[3], @x[2]
+       vand    @s[3], @s[3], @s[0]
+       vand    @s[1], @s[1], @t[1]
+       veor    @t[1], @x[4], @x[5]
+       veor    @s[0], @x[1], @x[0]
+       veor    @t[3], @t[3], @s[1]
+       veor    @t[2], @t[2], @s[1]
+       vand    @s[1], @t[1], @s[0]
+       vorr    @t[1], @t[1], @s[0]
+       veor    @t[3], @t[3], @s[3]
+       veor    @t[0], @t[0], @s[1]
+       veor    @t[2], @t[2], @s[2]
+       veor    @t[1], @t[1], @s[3]
+       veor    @t[0], @t[0], @s[2]
+       vand    @s[0], @x[7], @x[3]
+       veor    @t[1], @t[1], @s[2]
+       vand    @s[1], @x[6], @x[2]
+       vand    @s[2], @x[5], @x[1]
+       vorr    @s[3], @x[4], @x[0]
+       veor    @t[3], @t[3], @s[0]
+       veor    @t[1], @t[1], @s[2]
+       veor    @t[0], @t[0], @s[3]
+       veor    @t[2], @t[2], @s[1]
+
+       @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+       @ new smaller inversion
+
+       vand    @s[2], @t[3], @t[1]
+       vmov    @s[0], @t[0]
+
+       veor    @s[1], @t[2], @s[2]
+       veor    @s[3], @t[0], @s[2]
+       veor    @s[2], @t[0], @s[2]     @ @s[2]=@s[3]
+
+       vbsl    @s[1], @t[1], @t[0]
+       vbsl    @s[3], @t[3], @t[2]
+       veor    @t[3], @t[3], @t[2]
+
+       vbsl    @s[0], @s[1], @s[2]
+       vbsl    @t[0], @s[2], @s[1]
+
+       vand    @s[2], @s[0], @s[3]
+       veor    @t[1], @t[1], @t[0]
+
+       veor    @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+       &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+       vldmia  $key!, {@t[0]-@t[3]}
+       veor    @t[0], @t[0], @x[0]
+       veor    @t[1], @t[1], @x[1]
+       vtbl.8  `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[0]}
+       veor    @t[2], @t[2], @x[2]
+       vtbl.8  `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[1]}
+       veor    @t[3], @t[3], @x[3]
+       vtbl.8  `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[2]}
+       vtbl.8  `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[3]}
+       veor    @t[0], @t[0], @x[4]
+       veor    @t[1], @t[1], @x[5]
+       vtbl.8  `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+       veor    @t[2], @t[2], @x[6]
+       vtbl.8  `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+       veor    @t[3], @t[3], @x[7]
+       vtbl.8  `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+       vtbl.8  `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];        # optional
+$code.=<<___;
+       vext.8  @t[0], @x[0], @x[0], #12        @ x0 <<< 32
+       vext.8  @t[1], @x[1], @x[1], #12
+        veor   @x[0], @x[0], @t[0]             @ x0 ^ (x0 <<< 32)
+       vext.8  @t[2], @x[2], @x[2], #12
+        veor   @x[1], @x[1], @t[1]
+       vext.8  @t[3], @x[3], @x[3], #12
+        veor   @x[2], @x[2], @t[2]
+       vext.8  @t[4], @x[4], @x[4], #12
+        veor   @x[3], @x[3], @t[3]
+       vext.8  @t[5], @x[5], @x[5], #12
+        veor   @x[4], @x[4], @t[4]
+       vext.8  @t[6], @x[6], @x[6], #12
+        veor   @x[5], @x[5], @t[5]
+       vext.8  @t[7], @x[7], @x[7], #12
+        veor   @x[6], @x[6], @t[6]
+
+       veor    @t[1], @t[1], @x[0]
+        veor   @x[7], @x[7], @t[7]
+        vext.8 @x[0], @x[0], @x[0], #8         @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    @t[2], @t[2], @x[1]
+       veor    @t[0], @t[0], @x[7]
+       veor    @t[1], @t[1], @x[7]
+        vext.8 @x[1], @x[1], @x[1], #8
+       veor    @t[5], @t[5], @x[4]
+        veor   @x[0], @x[0], @t[0]
+       veor    @t[6], @t[6], @x[5]
+        veor   @x[1], @x[1], @t[1]
+        vext.8 @t[0], @x[4], @x[4], #8
+       veor    @t[4], @t[4], @x[3]
+        vext.8 @t[1], @x[5], @x[5], #8
+       veor    @t[7], @t[7], @x[6]
+        vext.8 @x[4], @x[3], @x[3], #8
+       veor    @t[3], @t[3], @x[2]
+        vext.8 @x[5], @x[7], @x[7], #8
+       veor    @t[4], @t[4], @x[7]
+        vext.8 @x[3], @x[6], @x[6], #8
+       veor    @t[3], @t[3], @x[7]
+        vext.8 @x[6], @x[2], @x[2], #8
+       veor    @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+       veor    @x[2], @t[0], @t[4]
+       veor    @x[4], @x[4], @t[3]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[3], @x[3], @t[6]
+        @ vmov @x[2], @t[0]
+       veor    @x[6], @x[6], @t[2]
+        @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+       veor    @t[3], @t[3], @x[4]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[2], @x[3], @t[6]
+       veor    @x[3], @t[0], @t[4]
+       veor    @x[4], @x[6], @t[2]
+       vmov    @x[6], @t[3]
+        @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+       @ multiplication by 0x0e
+       vext.8  @t[7], @x[7], @x[7], #12
+       vmov    @t[2], @x[2]
+       veor    @x[2], @x[2], @x[5]             @ 2 5
+       veor    @x[7], @x[7], @x[5]             @ 7 5
+       vext.8  @t[0], @x[0], @x[0], #12
+       vmov    @t[5], @x[5]
+       veor    @x[5], @x[5], @x[0]             @ 5 0           [1]
+       veor    @x[0], @x[0], @x[1]             @ 0 1
+       vext.8  @t[1], @x[1], @x[1], #12
+       veor    @x[1], @x[1], @x[2]             @ 1 25
+       veor    @x[0], @x[0], @x[6]             @ 01 6          [2]
+       vext.8  @t[3], @x[3], @x[3], #12
+       veor    @x[1], @x[1], @x[3]             @ 125 3         [4]
+       veor    @x[2], @x[2], @x[0]             @ 25 016        [3]
+       veor    @x[3], @x[3], @x[7]             @ 3 75
+       veor    @x[7], @x[7], @x[6]             @ 75 6          [0]
+       vext.8  @t[6], @x[6], @x[6], #12
+       vmov    @t[4], @x[4]
+       veor    @x[6], @x[6], @x[4]             @ 6 4
+       veor    @x[4], @x[4], @x[3]             @ 4 375         [6]
+       veor    @x[3], @x[3], @x[7]             @ 375 756=36
+       veor    @x[6], @x[6], @t[5]             @ 64 5          [7]
+       veor    @x[3], @x[3], @t[2]             @ 36 2
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @x[3], @x[3], @t[4]             @ 362 4         [5]
+___
+                                       my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+       @ multiplication by 0x0b
+       veor    @y[1], @y[1], @y[0]
+       veor    @y[0], @y[0], @t[0]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[1], @y[1], @t[6]
+       veor    @y[0], @y[0], @t[7]
+       veor    @t[7], @t[7], @t[6]             @ clobber t[7]
+
+       veor    @y[3], @y[3], @t[0]
+        veor   @y[1], @y[1], @y[0]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[2], @y[2], @t[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+       veor    @y[2], @y[2], @t[2]
+       veor    @y[3], @y[3], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[7]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[3], @y[3], @t[3]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[4], @y[4], @t[3]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[3], @t[3], @t[3], #12
+       veor    @y[5], @y[5], @t[4]
+       veor    @y[7], @y[7], @t[7]
+       veor    @t[7], @t[7], @t[5]             @ clobber t[7] even more
+       veor    @y[3], @y[3], @t[5]
+       veor    @y[4], @y[4], @t[4]
+
+       veor    @y[5], @y[5], @t[7]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[6], @y[6], @t[7]
+       veor    @y[4], @y[4], @t[7]
+
+       veor    @t[7], @t[7], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+
+       @ multiplication by 0x0d
+       veor    @y[4], @y[4], @y[7]
+        veor   @t[7], @t[7], @t[6]             @ restore t[7]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @y[2], @y[2], @t[0]
+       veor    @y[7], @y[7], @t[5]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @y[2], @y[2], @t[2]
+
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[0]
+       veor    @y[3], @y[3], @t[0]
+       veor    @y[1], @y[1], @t[5]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[1], @y[1], @t[7]
+       veor    @y[0], @y[0], @t[6]
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+
+       veor    @y[7], @y[7], @t[7]
+       veor    @y[4], @y[4], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[6]
+       veor    @t[6], @t[6], @t[3]             @ clobber t[6]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[4], @y[4], @y[7]
+       veor    @y[3], @y[3], @t[6]
+
+       veor    @y[6], @y[6], @t[6]
+       veor    @y[5], @y[5], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @y[6], @y[6], @t[4]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[5], @y[5], @t[6]
+       veor    @y[6], @y[6], @t[7]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @t[6], @t[6], @t[3]             @ restore t[6]
+       vext.8  @t[3], @t[3], @t[3], #12
+
+       @ multiplication by 0x09
+       veor    @y[4], @y[4], @y[1]
+       veor    @t[1], @t[1], @y[1]             @ t[1]=y[1]
+       veor    @t[0], @t[0], @t[5]             @ clobber t[0]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @t[1], @t[1], @t[5]
+       veor    @y[3], @y[3], @t[0]
+       veor    @t[0], @t[0], @y[0]             @ t[0]=y[0]
+       veor    @t[1], @t[1], @t[6]
+       veor    @t[6], @t[6], @t[7]             @ clobber t[6]
+       veor    @y[4], @y[4], @t[1]
+       veor    @y[7], @y[7], @t[4]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[5], @y[5], @t[2]
+       veor    @t[4], @t[4], @y[4]             @ t[4]=y[4]
+       veor    @t[3], @t[3], @y[3]             @ t[3]=y[3]
+       veor    @t[5], @t[5], @y[5]             @ t[5]=y[5]
+       veor    @t[2], @t[2], @y[2]             @ t[2]=y[2]
+       veor    @t[3], @t[3], @t[7]
+       veor    @XMM[5], @t[5], @t[6]
+       veor    @XMM[6], @t[6], @y[6]           @ t[6]=y[6]
+       veor    @XMM[2], @t[2], @t[6]
+       veor    @XMM[7], @t[7], @y[7]           @ t[7]=y[7]
+
+       vmov    @XMM[0], @t[0]
+       vmov    @XMM[1], @t[1]
+       @ vmov  @XMM[2], @t[2]
+       vmov    @XMM[3], @t[3]
+       vmov    @XMM[4], @t[4]
+       @ vmov  @XMM[5], @t[5]
+       @ vmov  @XMM[6], @t[6]
+       @ vmov  @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  @t[0], @x[0], @x[0], #8
+       vext.8  @t[6], @x[6], @x[6], #8
+       vext.8  @t[7], @x[7], @x[7], #8
+       veor    @t[0], @t[0], @x[0]
+       vext.8  @t[1], @x[1], @x[1], #8
+       veor    @t[6], @t[6], @x[6]
+       vext.8  @t[2], @x[2], @x[2], #8
+       veor    @t[7], @t[7], @x[7]
+       vext.8  @t[3], @x[3], @x[3], #8
+       veor    @t[1], @t[1], @x[1]
+       vext.8  @t[4], @x[4], @x[4], #8
+       veor    @t[2], @t[2], @x[2]
+       vext.8  @t[5], @x[5], @x[5], #8
+       veor    @t[3], @t[3], @x[3]
+       veor    @t[4], @t[4], @x[4]
+       veor    @t[5], @t[5], @x[5]
+
+        veor   @x[0], @x[0], @t[6]
+        veor   @x[1], @x[1], @t[6]
+        veor   @x[2], @x[2], @t[0]
+        veor   @x[4], @x[4], @t[2]
+        veor   @x[3], @x[3], @t[1]
+        veor   @x[1], @x[1], @t[7]
+        veor   @x[2], @x[2], @t[7]
+        veor   @x[4], @x[4], @t[6]
+        veor   @x[5], @x[5], @t[3]
+        veor   @x[3], @x[3], @t[6]
+        veor   @x[6], @x[6], @t[4]
+        veor   @x[4], @x[4], @t[7]
+        veor   @x[5], @x[5], @t[7]
+        veor   @x[7], @x[7], @t[5]
+___
+       &MixColumns     (@x,@t,1);      # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+       vshr.u64        $t, $b, #$n
+       veor            $t, $t, $a
+       vand            $t, $t, $mask
+       veor            $a, $a, $t
+       vshl.u64        $t, $t, #$n
+       veor            $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+       vshr.u64        $t0, $b0, #$n
+        vshr.u64       $t1, $b1, #$n
+       veor            $t0, $t0, $a0
+        veor           $t1, $t1, $a1
+       vand            $t0, $t0, $mask
+        vand           $t1, $t1, $mask
+       veor            $a0, $a0, $t0
+       vshl.u64        $t0, $t0, #$n
+        veor           $a1, $a1, $t1
+        vshl.u64       $t1, $t1, #$n
+       veor            $b0, $b0, $t0
+        veor           $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+       vmov.i8 $t0,#0x55                       @ compose .LBS0
+       vmov.i8 $t1,#0x33                       @ compose .LBS1
+___
+       &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+       &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+       vmov.i8 $t0,#0x0f                       @ compose .LBS2
+___
+       &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+       &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+       &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+       &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     $const,_bsaes_decrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       add     $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0ISR
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+       &InvSbox        (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Ldec_done
+___
+       &InvMixColumns  (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Ldec_loop
+       vldmia  $const, {@XMM[12]}              @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+___
+       &bitslice       (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     $const,_bsaes_encrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       sub     $const,$const,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+       &Sbox           (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Lenc_done
+___
+       &MixColumns     (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Lenc_loop
+       vldmia  $const, {@XMM[12]}              @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+___
+       # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+       &bitslice       (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+       &swapmove       (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+       @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+       vmov    @x[2], @x[0]
+       vmov    @x[3], @x[1]
+___
+       #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+       &swapmove2x     (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+       @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+       vmov    @x[4], @x[0]
+       vmov    @x[6], @x[2]
+       vmov    @x[5], @x[1]
+       vmov    @x[7], @x[3]
+___
+       &swapmove2x     (@x[0,4,1,5],4,$bs2,$t2,$t3);
+       &swapmove2x     (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     $const,_bsaes_key_convert
+       vld1.8  {@XMM[7]},  [$inp]!             @ load round 0 key
+       sub     $const,$const,#_bsaes_key_convert-.LM0
+       vld1.8  {@XMM[15]}, [$inp]!             @ load round 1 key
+
+       vmov.i8 @XMM[8],  #0x01                 @ bit masks
+       vmov.i8 @XMM[9],  #0x02
+       vmov.i8 @XMM[10], #0x04
+       vmov.i8 @XMM[11], #0x08
+       vmov.i8 @XMM[12], #0x10
+       vmov.i8 @XMM[13], #0x20
+       vldmia  $const, {@XMM[14]}              @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        @XMM[7],  @XMM[7]
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       sub     $rounds,$rounds,#1
+       vstmia  $out!, {@XMM[7]}                @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+       vtbl.8  `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+       vmov.i8 @XMM[6],  #0x40
+       vmov.i8 @XMM[15], #0x80
+
+       vtst.8  @XMM[0], @XMM[7], @XMM[8]
+       vtst.8  @XMM[1], @XMM[7], @XMM[9]
+       vtst.8  @XMM[2], @XMM[7], @XMM[10]
+       vtst.8  @XMM[3], @XMM[7], @XMM[11]
+       vtst.8  @XMM[4], @XMM[7], @XMM[12]
+       vtst.8  @XMM[5], @XMM[7], @XMM[13]
+       vtst.8  @XMM[6], @XMM[7], @XMM[6]
+       vtst.8  @XMM[7], @XMM[7], @XMM[15]
+       vld1.8  {@XMM[15]}, [$inp]!             @ load next round key
+       vmvn    @XMM[0], @XMM[0]                @ "pnot"
+       vmvn    @XMM[1], @XMM[1]
+       vmvn    @XMM[5], @XMM[5]
+       vmvn    @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       subs    $rounds,$rounds,#1
+       vstmia  $out!,{@XMM[0]-@XMM[7]}         @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 @XMM[7],#0x63                   @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {               # following four functions are unsupported interface
+                       # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type  bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type  bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Lenc128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_encrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Lenc128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type  bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       vldmia  $out, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $out, {@XMM[7]}
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type  bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Ldec128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_decrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Ldec128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     $len, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ivp, [ip]                      @ IV is 1st arg on the stack
+       mov     $len, $len, lsr#4               @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       vldmia  $keysched, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $keysched, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12, {@XMM[15]}                 @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {@XMM[15]}, [$ivp]              @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    $len, $len, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5, $rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]
+       sub     $inp, $inp, #0x60
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[14]-@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       veor    @XMM[5], @XMM[5], @XMM[14]
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[5]}, [$out]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    $len, $len, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {@XMM[0]}, [$inp]!              @ load input
+       cmp     $len, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {@XMM[1]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       mov     r5, $rounds
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {@XMM[2]}, [$inp]!
+       cmp     $len, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {@XMM[3]}, [$inp]!
+       beq     .Lcbc_dec_four
+       vld1.8  {@XMM[4]}, [$inp]!
+       cmp     $len, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {@XMM[5]}, [$inp]!
+       beq     .Lcbc_dec_six
+       vld1.8  {@XMM[6]}, [$inp]!
+       sub     $inp, $inp, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     $inp, $inp, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  $fp,{@XMM[14]}                  @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     $inp, $inp, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     $inp, $inp, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     $inp, $inp, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     $inp, $inp, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]}, [$inp]!              @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!             @ reload input
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     $inp, $inp, #0x10
+       mov     $rounds, $out                   @ save original out pointer
+       mov     $out, $fp                       @ use the iv scratch space as out buffer
+       mov     r2, $key
+       vmov    @XMM[4],@XMM[15]                @ just in case ensure that IV
+       vmov    @XMM[5],@XMM[0]                 @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {@XMM[0]}, [$fp,:64]            @ load result
+       veor    @XMM[0], @XMM[0], @XMM[4]       @ ^= IV
+       vmov    @XMM[15], @XMM[5]               @ @XMM[5] holds input
+       vst1.8  {@XMM[0]}, [$rounds]            @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10                       @ add sp,$fp,#0x10 is no good for thumb
+       vst1.8  {@XMM[15]}, [$ivp]              @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";      # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     $len, #8                        @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ctr, [ip]                      @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       add     $ctr, $const, #.LREVM0SR-.LM0   @ borrow $ctr
+       vldmia  $keysched, {@XMM[4]}            @ load round0 key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+.align 2
+0:     add     r12, $key, #248
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       adrl    $ctr, .LREVM0SR                 @ borrow $ctr
+       vldmia  r12, {@XMM[4]}                  @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        @XMM[8],#1              @ compose 1<<96
+       veor            @XMM[9],@XMM[9],@XMM[9]
+       vrev32.8        @XMM[0],@XMM[0]
+       vext.8          @XMM[8],@XMM[9],@XMM[8],#4
+       vrev32.8        @XMM[4],@XMM[4]
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+       vstmia  $keysched, {@XMM[4]}            @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        @XMM[10], @XMM[8], @XMM[9]      @ compose 3<<96
+       vadd.u32        @XMM[1], @XMM[0], @XMM[8]       @ +1
+       vadd.u32        @XMM[2], @XMM[0], @XMM[9]       @ +2
+       vadd.u32        @XMM[3], @XMM[0], @XMM[10]      @ +3
+       vadd.u32        @XMM[4], @XMM[1], @XMM[10]
+       vadd.u32        @XMM[5], @XMM[2], @XMM[10]
+       vadd.u32        @XMM[6], @XMM[3], @XMM[10]
+       vadd.u32        @XMM[7], @XMM[4], @XMM[10]
+       vadd.u32        @XMM[10], @XMM[5], @XMM[10]     @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          $keysched, {@XMM[9]}            @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, $keysched, #0x10            @ pass next round key
+#else
+       add             r4, $key, #`248+16`
+#endif
+       vldmia          $ctr, {@XMM[8]}                 @ .LREVM0SR
+       mov             r5, $rounds                     @ pass rounds
+       vstmia          $fp, {@XMM[10]}                 @ save next counter
+       sub             $const, $ctr, #.LREVM0SR-.LSR   @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            $len, $len, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {@XMM[8]-@XMM[9]}, [$inp]!      @ load input
+       vld1.8          {@XMM[10]-@XMM[11]}, [$inp]!
+       veor            @XMM[0], @XMM[8]
+       veor            @XMM[1], @XMM[9]
+       vld1.8          {@XMM[12]-@XMM[13]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       veor            @XMM[6], @XMM[11]
+       vld1.8          {@XMM[14]-@XMM[15]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor            @XMM[7], @XMM[13]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[4]}, [$out]!
+       veor            @XMM[5], @XMM[15]
+       vst1.8          {@XMM[6]}, [$out]!
+       vmov.i32        @XMM[8], #1                     @ compose 1<<96
+       vst1.8          {@XMM[3]}, [$out]!
+       veor            @XMM[9], @XMM[9], @XMM[9]
+       vst1.8          {@XMM[7]}, [$out]!
+       vext.8          @XMM[8], @XMM[9], @XMM[8], #4
+       vst1.8          {@XMM[2]}, [$out]!
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8]         @ compose 2<<96
+       vst1.8          {@XMM[5]}, [$out]!
+       vldmia          $fp, {@XMM[0]}                  @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             $len, $len, #8
+       vld1.8          {@XMM[8]}, [$inp]!      @ load input
+       veor            @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!      @ write output
+       cmp             $len, #2
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[9]}, [$inp]!
+       veor            @XMM[1], @XMM[9]
+       vst1.8          {@XMM[1]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[10]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       vst1.8          {@XMM[4]}, [$out]!
+       cmp             $len, #4
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[11]}, [$inp]!
+       veor            @XMM[6], @XMM[11]
+       vst1.8          {@XMM[6]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[12]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[3]}, [$out]!
+       cmp             $len, #6
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[13]}, [$inp]!
+       veor            @XMM[7], @XMM[13]
+       vst1.8          {@XMM[7]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[14]}, [$inp]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          $keysched, {q0-q1}
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10               @ add sp,$fp,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, $inp                @ copy arguments
+       mov     r5, $out
+       mov     r6, $len
+       mov     r7, $key
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {@XMM[1]}, [ip]         @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {@XMM[1]}, [sp,:64]     @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {@XMM[0]}, [r4]!        @ load input
+       vld1.8  {@XMM[1]}, [sp,:64]     @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    @XMM[0],@XMM[0],@XMM[1]
+       vst1.8  {@XMM[0]}, [r5]!        @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#      const AES_KEY *key1, const AES_KEY *key2,
+#      const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";                # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       subs    $len, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            $len, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, $out, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [$inp], #1
+       ldrb            r1, [$out, #-0x10]
+       strb            r0, [$out, #-0x10]
+       strb            r1, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       tst     $len, #0xf                      @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   $len, #0x10                     @ subtract another 16 bytes
+       subs    $len, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            $len, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+       mov             r5, $magic                      @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+       mov             $magic, r5
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          $magic, {$twmask}
+       vshr.s64        @XMM[6], @XMM[8], #63
+       vand            @XMM[6], @XMM[6], $twmask
+       vadd.u64        @XMM[9], @XMM[8], @XMM[8]
+       vswp            `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+       veor            @XMM[9], @XMM[9], @XMM[6]
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {@XMM[0]}, [$inp]!
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       vst1.8          {@XMM[0]}, [$out]
+
+       mov             r6, $out
+.Lxts_dec_steal:
+       ldrb            r1, [$out]
+       ldrb            r0, [$inp], #1
+       strb            r1, [$out, #0x10]
+       strb            r0, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+       next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
index d3db39860b9cc5eb83053f99f9430b926443609c..a6395c0277152f74645b8fceb572abc9b1c5a2db 100644 (file)
@@ -24,6 +24,7 @@ generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += siginfo.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
@@ -31,5 +32,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += timex.h
 generic-y += trace_clock.h
-generic-y += types.h
 generic-y += unaligned.h
index da1c77d39327963ab10e633aeb8809aac7da2dec..55ffc3b850f43e17b4337c83af16c1c0bdf81afc 100644 (file)
@@ -12,6 +12,7 @@
 #define __ASM_ARM_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <linux/prefetch.h>
 #include <linux/types.h>
 #include <linux/irqflags.h>
 #include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
        unsigned long tmp;
        int result;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic_add\n"
 "1:    ldrex   %0, [%3]\n"
 "      add     %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
        unsigned long tmp;
        int result;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic_sub\n"
 "1:    ldrex   %0, [%3]\n"
 "      sub     %0, %0, %4\n"
@@ -138,6 +141,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 {
        unsigned long tmp, tmp2;
 
+       prefetchw(addr);
        __asm__ __volatile__("@ atomic_clear_mask\n"
 "1:    ldrex   %0, [%3]\n"
 "      bic     %0, %0, %4\n"
@@ -283,6 +287,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
 {
        u64 tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_set\n"
 "1:    ldrexd  %0, %H0, [%2]\n"
 "      strexd  %0, %3, %H3, [%2]\n"
@@ -299,6 +304,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        u64 result;
        unsigned long tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_add\n"
 "1:    ldrexd  %0, %H0, [%3]\n"
 "      adds    %0, %0, %4\n"
@@ -339,6 +345,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        u64 result;
        unsigned long tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_sub\n"
 "1:    ldrexd  %0, %H0, [%3]\n"
 "      subs    %0, %0, %4\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644 (file)
index 0000000..1714800
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by:  Nicolas Pitre, April 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+                        bL_switch_completion_handler completer,
+                        void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+       return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE   0
+#define BL_NOTIFY_POST_ENABLE  1
+#define BL_NOTIFY_PRE_DISABLE  2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled.  Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled().  These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
index 4f009c10540dff2a2e7efd08b0671c2369547b90..df2fbba7efc80d57074a6053704a9c70119aae03 100644 (file)
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        return ret;
 }
 
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+                                            unsigned long long old,
+                                            unsigned long long new)
+{
+       unsigned long long oldval;
+       unsigned long res;
+
+       __asm__ __volatile__(
+"1:    ldrexd          %1, %H1, [%3]\n"
+"      teq             %1, %4\n"
+"      teqeq           %H1, %H4\n"
+"      bne             2f\n"
+"      strexd          %0, %5, %H5, [%3]\n"
+"      teq             %0, #0\n"
+"      bne             1b\n"
+"2:"
+       : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+       : "r" (ptr), "r" (old), "r" (new)
+       : "cc");
+
+       return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+                                               unsigned long long old,
+                                               unsigned long long new)
+{
+       unsigned long long ret;
+
+       smp_mb();
+       ret = __cmpxchg64(ptr, old, new);
+       smp_mb();
+
+       return ret;
+}
+
 #define cmpxchg_local(ptr,o,n)                                         \
        ((__typeof__(*(ptr)))__cmpxchg_local((ptr),                     \
                                       (unsigned long)(o),              \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
                                       sizeof(*(ptr))))
 
 #define cmpxchg64(ptr, o, n)                                           \
-       ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),       \
-                                               atomic64_t,             \
-                                               counter),               \
-                                             (unsigned long long)(o),  \
-                                             (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n)                                     \
-       ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),        \
-                                               local64_t,              \
-                                               a),                     \
-                                            (unsigned long long)(o),   \
-                                            (unsigned long long)(n)))
+       ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),                      \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n)                                   \
+       ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n)     cmpxchg64_relaxed((ptr), (o), (n))
 
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
index 9672e978d50df67d94c3dd86d23f3bcdd187c54d..acdde76b39bbae3064034fff78b9dd2b95bbd39c 100644 (file)
@@ -10,6 +10,7 @@
 #define CPUID_TLBTYPE  3
 #define CPUID_MPUIR    4
 #define CPUID_MPIDR    5
+#define CPUID_REVIDR   6
 
 #ifdef CONFIG_CPU_V7M
 #define CPUID_EXT_PFR0 0x40
index 5b579b951503e51a436683d6fb0be964d983bf67..863cd84eb1a24955e9af8253dcfa8f0b710c95ae 100644 (file)
@@ -64,6 +64,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 {
        return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
 }
+
 #else
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
@@ -86,6 +87,13 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 }
 #endif
 
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+       return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
index 2740c2a2df639361617f6fe484ead14f8625eaf2..3d7351c844aac0ae2392d441796ce9904dcaf717 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 6
+#define NR_IPI 7
 
 typedef struct {
        unsigned int __softirq_pending;
index bfc198c759130109d44b73b4506ea853d01fba28..863c892b4aaa7403c3bde97b7de3b2cd9a75519c 100644 (file)
@@ -16,7 +16,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\n\t"
+       asm_volatile_goto("1:\n\t"
                 JUMP_LABEL_NOP "\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".word 1b, %l[l_yes], %c0\n\t"
index 402a2bc6aa687b94b09af6efa039c58fb80436d2..17a3fa2979e8ae5c5a56f88eda448f635c3e132c 100644 (file)
@@ -49,6 +49,7 @@ struct machine_desc {
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **,
                                         struct meminfo *);
+       void                    (*init_meminfo)(void);
        void                    (*reserve)(void);/* reserve mem blocks  */
        void                    (*map_io)(void);/* IO mapping function  */
        void                    (*init_early)(void);
index 0f7b7620e9a554b0b4a0ba4939a60c905e0a435c..5506618119f923b3fa6e0484b3552c6d0dbc1e5d 100644 (file)
@@ -41,6 +41,14 @@ extern void mcpm_entry_point(void);
  */
 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
 
+/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated.  The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+                        unsigned long poke_phys_addr, unsigned long poke_val);
+
 /*
  * CPU/cluster power operations API for higher subsystems to use.
  */
@@ -76,8 +84,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_power_down(void);
 
@@ -98,8 +109,11 @@ void mcpm_cpu_power_down(void);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_suspend(u64 expected_residency);
 
index e750a938fd3ce283ccd351cb73ed9c3d8df84e75..6748d6295a1a07ec23738d660dafb88489f99595 100644 (file)
  * so that all we need to do is modify the 8-bit constant field.
  */
 #define __PV_BITS_31_24        0x81000000
+#define __PV_BITS_7_0  0x81
+
+extern phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
+extern u64 __pv_phys_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
 
-extern unsigned long __pv_phys_offset;
 #define PHYS_OFFSET __pv_phys_offset
 
 #define __pv_stub(from,to,instr,type)                  \
@@ -185,22 +191,58 @@ extern unsigned long __pv_phys_offset;
        : "=r" (to)                                     \
        : "r" (from), "I" (type))
 
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t)                            \
+       __asm__ volatile("@ __pv_stub_mov\n"            \
+       "1:     mov     %R0, %1\n"                      \
+       "       .pushsection .pv_table,\"a\"\n"         \
+       "       .long   1b\n"                           \
+       "       .popsection\n"                          \
+       : "=r" (t)                                      \
+       : "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y)                      \
+       __asm__ volatile("@ __pv_add_carry_stub\n"      \
+       "1:     adds    %Q0, %1, %2\n"                  \
+       "       adc     %R0, %R0, #0\n"                 \
+       "       .pushsection .pv_table,\"a\"\n"         \
+       "       .long   1b\n"                           \
+       "       .popsection\n"                          \
+       : "+r" (y)                                      \
+       : "r" (x), "I" (__PV_BITS_31_24)                \
+       : "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
 {
-       unsigned long t;
-       __pv_stub(x, t, "add", __PV_BITS_31_24);
+       phys_addr_t t;
+
+       if (sizeof(phys_addr_t) == 4) {
+               __pv_stub(x, t, "add", __PV_BITS_31_24);
+       } else {
+               __pv_stub_mov_hi(t);
+               __pv_add_carry_stub(x, t);
+       }
        return t;
 }
 
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
 {
        unsigned long t;
        __pv_stub(x, t, "sub", __PV_BITS_31_24);
        return t;
 }
+
 #else
-#define __virt_to_phys(x)      ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+       return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+       return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
 #endif
 #endif
 #endif /* __ASSEMBLY__ */
@@ -238,16 +280,31 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
 
 static inline void *phys_to_virt(phys_addr_t x)
 {
-       return (void *)(__phys_to_virt((unsigned long)(x)));
+       return (void *)__phys_to_virt(x);
 }
 
 /*
  * Drivers should NOT use these either.
  */
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
-#define __va(x)                        ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes.  Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+       if (arch_virt_to_idmap)
+               return arch_virt_to_idmap(x);
+       else
+               return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x)       __virt_to_idmap((unsigned long)(x))
+
 /*
  * Virtual <-> DMA view memory address translations
  * Again, these are *only* valid on the kernel direct mapped RAM
index f97ee02386ee063ba12b78786c9a6cd8a4106676..86a659a19526c75a2ba3b91ce839925cc52d26de 100644 (file)
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd)   (0)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_PGTABLE_2LEVEL_H */
index 5689c18c85f5ebafb95a9bb2cc99fda227992976..39c54cfa03e9b103ef39982a43bac74d5436b791 100644 (file)
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (!(pmd_val(pmd) & PMD_SECT_RDONLY))
 
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define pmd_trans_huge(pmd)    (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
 #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
index 413f3876341cd6fd2e7bc4b1c6a71873cadaa887..c3d5fc124a054c6309ffacdb2845ff22fd5bfa56 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
+#include <asm/unified.h>
 
 #ifdef __KERNEL__
 #define STACK_TOP      ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_EIP(tsk)  task_pt_regs(tsk)->ARM_pc
 #define KSTK_ESP(tsk)  task_pt_regs(tsk)->ARM_sp
 
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up)                                         \
+       "9998:  " smp "\n"                                              \
+       "       .pushsection \".alt.smp.init\", \"a\"\n"                \
+       "       .long   9998b\n"                                        \
+       "       " up "\n"                                               \
+       "       .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
 /*
  * Prefetching support - only ARMv5.
  */
@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
 {
        __asm__ __volatile__(
                "pld\t%a0"
-               :
-               : "p" (ptr)
-               : "cc");
+               :: "p" (ptr));
 }
 
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 #define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+       __asm__ __volatile__(
+               ".arch_extension        mp\n"
+               __ALT_SMP_ASM(
+                       WASM(pldw)              "\t%a0",
+                       WASM(pld)               "\t%a0"
+               )
+               :: "p" (ptr));
+}
+#endif
 #endif
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
index a8cae71caceb3fb89c1ec949063b7a0d621dbdca..22a3b9b5d4a16fd4ece50bdfc83859f6ea38352f 100644 (file)
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
 struct smp_operations {
 #ifdef CONFIG_SMP
        /*
index 4f2c28060c9aa227c47e73ac6557c45add91128f..ef3c6072aa45345ae4594f22aebbe9a9ebc538f1 100644 (file)
@@ -5,21 +5,13 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
-#include <asm/processor.h>
+#include <linux/prefetch.h>
 
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  * extensions, so when running on UP, we have to patch these instructions away.
  */
-#define ALT_SMP(smp, up)                                       \
-       "9998:  " smp "\n"                                      \
-       "       .pushsection \".alt.smp.init\", \"a\"\n"        \
-       "       .long   9998b\n"                                \
-       "       " up "\n"                                       \
-       "       .popsection\n"
-
 #ifdef CONFIG_THUMB2_KERNEL
-#define SEV            ALT_SMP("sev.w", "nop.w")
 /*
  * For Thumb-2, special care is needed to ensure that the conditional WFE
  * instruction really does assemble to exactly 4 bytes (as required by
  * the assembler won't change IT instructions which are explicitly present
  * in the input.
  */
-#define WFE(cond)      ALT_SMP(                \
+#define WFE(cond)      __ALT_SMP_ASM(          \
        "it " cond "\n\t"                       \
        "wfe" cond ".n",                        \
                                                \
        "nop.w"                                 \
 )
 #else
-#define SEV            ALT_SMP("sev", "nop")
-#define WFE(cond)      ALT_SMP("wfe" cond, "nop")
+#define WFE(cond)      __ALT_SMP_ASM("wfe" cond, "nop")
 #endif
 
+#define SEV            __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
 static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        u32 newval;
        arch_spinlock_t lockval;
 
+       prefetchw(&lock->slock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%3]\n"
 "      add     %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
        unsigned long contended, res;
        u32 slock;
 
+       prefetchw(&lock->slock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        dsb_sev();
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.tickets.owner == lock.tickets.next;
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-       return tickets.owner != tickets.next;
+       return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%1]\n"
 "      teq     %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 }
 
 /* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)         ((x)->lock == 0)
+#define arch_write_can_lock(x)         (ACCESS_ONCE((x)->lock) == 0)
 
 /*
  * Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      adds    %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
        smp_mb();
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      sub     %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 }
 
 /* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)          ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x)          (ACCESS_ONCE((x)->lock) < 0x80000000)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
index b262d2f8b4784eba5b6805d431468c285c434b88..47663fcb10ad7aad7e3bc87f31636a7a77342e36 100644 (file)
@@ -25,7 +25,7 @@ typedef struct {
 #define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
 
 typedef struct {
-       volatile unsigned int lock;
+       u32 lock;
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED                { 0 }
index f1d96d4e8092a652aeb84f5825082a8f9064cd20..73ddd7239b33aa77d178ae1341c0c46c736a08e5 100644 (file)
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
                unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                pr_warning("%s called with max args %d, handling only %d\n",
                           __func__, i + n, SYSCALL_MAX_ARGS);
index 38960264040cd989068b7e897979194bd5d30bc4..def9e570199f90a0c42dc7da0f8998fba6a0ab39 100644 (file)
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 }
 
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
-       unsigned int midr = read_cpuid_id();
-
-       /* Cortex-A15 r0p0..r3p2 affected */
-       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
-               return 0;
-       return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-       /*
-        * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
-        */
-       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
-       dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
-       return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
 /*
  *     flush_pmd_entry
  *
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
 
 #endif
 
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+       if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+               erratum_a15_798181_handler))
+               return erratum_a15_798181_handler();
+       return false;
+}
+#endif
+
 #endif
index 7e1f76027f666e252c35bd320d4518e110548c47..72abdc541f38f6e892a24050d1992fc37098f5e3 100644 (file)
 #include <asm/unified.h>
 #include <asm/compiler.h>
 
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
 
index f5989f46b4d2d450f18b24faa946de750394ec13..b88beaba6b4a5cf9c0ccded73723c4ea41539167 100644 (file)
@@ -38,6 +38,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)       instr.w
 #define BSYM(sym)      sym + 1
+#else
+#define WASM(instr)    #instr ".w"
 #endif
 
 #else  /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)       instr
 #define BSYM(sym)      sym
+#else
+#define WASM(instr)    #instr
 #endif
 
 #endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644 (file)
index 0000000..2265a19
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD              0x000c
+#define UARTn_CMD_TXEN                 0x0004
+
+#define        UARTn_STATUS            0x0010
+#define        UARTn_STATUS_TXC                0x0020
+#define        UARTn_STATUS_TXBL               0x0040
+
+#define        UARTn_TXDATA            0x0034
+
+               .macro  addruart, rx, tmp
+               ldr     \rx, =(CONFIG_DEBUG_UART_PHYS)
+
+               /*
+                * enable TX. The driver might disable it to save energy. We
+                * don't care about disabling at the end as during debug power
+                * consumption isn't that important.
+                */
+               ldr     \tmp, =(UARTn_CMD_TXEN)
+               str     \tmp, [\rx, #UARTn_CMD]
+               .endm
+
+               .macro  senduart,rd,rx
+               strb    \rd, [\rx, #UARTn_TXDATA]
+               .endm
+
+               .macro  waituart,rd,rx
+1001:          ldr     \rd, [\rx, #UARTn_STATUS]
+               tst     \rd, #UARTn_STATUS_TXBL
+               beq     1001b
+               .endm
+
+               .macro  busyuart,rd,rx
+1001:          ldr     \rd, [\rx, UARTn_STATUS]
+               tst     \rd, #UARTn_STATUS_TXC
+               bne     1001b
+               .endm
index 18d76fd5a2afb2bf27b91980f29c77094e6638c0..70a1c9da30ca39d4d4d79e8e73c3b8aec0d607e3 100644 (file)
@@ -7,6 +7,7 @@ header-y += hwcap.h
 header-y += ioctls.h
 header-y += kvm_para.h
 header-y += mman.h
+header-y += perf_regs.h
 header-y += posix_types.h
 header-y += ptrace.h
 header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..ce59448
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+       PERF_REG_ARM_R0,
+       PERF_REG_ARM_R1,
+       PERF_REG_ARM_R2,
+       PERF_REG_ARM_R3,
+       PERF_REG_ARM_R4,
+       PERF_REG_ARM_R5,
+       PERF_REG_ARM_R6,
+       PERF_REG_ARM_R7,
+       PERF_REG_ARM_R8,
+       PERF_REG_ARM_R9,
+       PERF_REG_ARM_R10,
+       PERF_REG_ARM_FP,
+       PERF_REG_ARM_IP,
+       PERF_REG_ARM_SP,
+       PERF_REG_ARM_LR,
+       PERF_REG_ARM_PC,
+       PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
index 5140df5f23aa485214914a8dfbfdf31dc04a5691..9b818ca3610bce4d61cf149ec62c0ebd2de8673c 100644 (file)
@@ -78,6 +78,7 @@ obj-$(CONFIG_CPU_XSC3)                += xscale-cp0.o
 obj-$(CONFIG_CPU_MOHAWK)       += xscale-cp0.o
 obj-$(CONFIG_CPU_PJ4)          += pj4-cp0.o
 obj-$(CONFIG_IWMMXT)           += iwmmxt.o
+obj-$(CONFIG_PERF_EVENTS)      += perf_regs.o
 obj-$(CONFIG_HW_PERF_EVENTS)   += perf_event.o perf_event_cpu.o
 AFLAGS_iwmmxt.o                        := -Wa,-mcpu=iwmmxt
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
index 60d3b738d4200987e76c75b2e9da513920087a89..1f031ddd0667a3e842317a90c59db3acc2284894 100644 (file)
@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
 
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 EXPORT_SYMBOL(__pv_phys_offset);
+EXPORT_SYMBOL(__pv_offset);
 #endif
index 74ad15d1a065fba97aac1d4ac43ff33a9e994470..bc6bd9683ba4555d9713e1693b258e6204fc90a5 100644 (file)
@@ -442,10 +442,10 @@ local_restart:
        ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
 
        add     r1, sp, #S_OFF
-       cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
        eor     r0, scno, #__NR_SYSCALL_BASE    @ put OS number back
        bcs     arm_syscall
-2:     mov     why, #0                         @ no longer a real syscall
+       mov     why, #0                         @ no longer a real syscall
        b       sys_ni_syscall                  @ not private func
 
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
index de23a9beed1333d86d1143a7b31613ff7376440d..39f89fbd5111ee9f0a96d6712e1f0f2ba308147d 100644 (file)
 #ifdef CONFIG_CONTEXT_TRACKING
        .if     \save
        stmdb   sp!, {r0-r3, ip, lr}
-       bl      user_exit
+       bl      context_tracking_user_exit
        ldmia   sp!, {r0-r3, ip, lr}
        .else
-       bl      user_exit
+       bl      context_tracking_user_exit
        .endif
 #endif
        .endm
 #ifdef CONFIG_CONTEXT_TRACKING
        .if     \save
        stmdb   sp!, {r0-r3, ip, lr}
-       bl      user_enter
+       bl      context_tracking_user_enter
        ldmia   sp!, {r0-r3, ip, lr}
        .else
-       bl      user_enter
+       bl      context_tracking_user_enter
        .endif
 #endif
        .endm
index 2c7cc1e03473aee9463e86d7dbedfc999f6e51f7..0f6c6d1fe447918dd5d98193b53a95a6e0a0fc1e 100644 (file)
@@ -487,7 +487,26 @@ __fixup_smp:
        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
        teq     r0, #0x80000000         @ not part of a uniprocessor system?
-       moveq   pc, lr                  @ yes, assume SMP
+       bne    __fixup_smp_on_up        @ no, assume UP
+
+       @ Core indicates it is SMP. Check for Aegis SOC where a single
+       @ Cortex-A9 CPU is present but SMP operations fault.
+       mov     r4, #0x41000000
+       orr     r4, r4, #0x0000c000
+       orr     r4, r4, #0x00000090
+       teq     r3, r4                  @ Check for ARM Cortex-A9
+       movne   pc, lr                  @ Not ARM Cortex-A9,
+
+       @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
+       @ below address check will need to be #ifdef'd or equivalent
+       @ for the Aegis platform.
+       mrc     p15, 4, r0, c15, c0     @ get SCU base address
+       teq     r0, #0x0                @ '0' on actual UP A9 hardware
+       beq     __fixup_smp_on_up       @ So its an A9 UP
+       ldr     r0, [r0, #4]            @ read SCU Config
+       and     r0, r0, #0x3            @ number of CPUs
+       teq     r0, #0x0                @ is 1?
+       movne   pc, lr
 
 __fixup_smp_on_up:
        adr     r0, 1f
@@ -536,6 +555,14 @@ ENTRY(fixup_smp)
        ldmfd   sp!, {r4 - r6, pc}
 ENDPROC(fixup_smp)
 
+#ifdef __ARMEB_
+#define LOW_OFFSET     0x4
+#define HIGH_OFFSET    0x0
+#else
+#define LOW_OFFSET     0x0
+#define HIGH_OFFSET    0x4
+#endif
+
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 
 /* __fixup_pv_table - patch the stub instructions with the delta between
@@ -546,17 +573,20 @@ ENDPROC(fixup_smp)
        __HEAD
 __fixup_pv_table:
        adr     r0, 1f
-       ldmia   r0, {r3-r5, r7}
-       sub     r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
+       ldmia   r0, {r3-r7}
+       mvn     ip, #0
+       subs    r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
        add     r4, r4, r3      @ adjust table start address
        add     r5, r5, r3      @ adjust table end address
-       add     r7, r7, r3      @ adjust __pv_phys_offset address
-       str     r8, [r7]        @ save computed PHYS_OFFSET to __pv_phys_offset
+       add     r6, r6, r3      @ adjust __pv_phys_offset address
+       add     r7, r7, r3      @ adjust __pv_offset address
+       str     r8, [r6, #LOW_OFFSET]   @ save computed PHYS_OFFSET to __pv_phys_offset
+       strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
        mov     r6, r3, lsr #24 @ constant for add/sub instructions
        teq     r3, r6, lsl #24 @ must be 16MiB aligned
 THUMB( it      ne              @ cross section branch )
        bne     __error
-       str     r6, [r7, #4]    @ save to __pv_offset
+       str     r3, [r7, #LOW_OFFSET]   @ save to __pv_offset low bits
        b       __fixup_a_pv_table
 ENDPROC(__fixup_pv_table)
 
@@ -565,10 +595,19 @@ ENDPROC(__fixup_pv_table)
        .long   __pv_table_begin
        .long   __pv_table_end
 2:     .long   __pv_phys_offset
+       .long   __pv_offset
 
        .text
 __fixup_a_pv_table:
+       adr     r0, 3f
+       ldr     r6, [r0]
+       add     r6, r6, r3
+       ldr     r0, [r6, #HIGH_OFFSET]  @ pv_offset high word
+       ldr     r6, [r6, #LOW_OFFSET]   @ pv_offset low word
+       mov     r6, r6, lsr #24
+       cmn     r0, #1
 #ifdef CONFIG_THUMB2_KERNEL
+       moveq   r0, #0x200000   @ set bit 21, mov to mvn instruction
        lsls    r6, #24
        beq     2f
        clz     r7, r6
@@ -582,18 +621,28 @@ __fixup_a_pv_table:
        b       2f
 1:     add     r7, r3
        ldrh    ip, [r7, #2]
-       and     ip, 0x8f00
-       orr     ip, r6  @ mask in offset bits 31-24
+       tst     ip, #0x4000
+       and     ip, #0x8f00
+       orrne   ip, r6  @ mask in offset bits 31-24
+       orreq   ip, r0  @ mask in offset bits 7-0
        strh    ip, [r7, #2]
+       ldrheq  ip, [r7]
+       biceq   ip, #0x20
+       orreq   ip, ip, r0, lsr #16
+       strheq  ip, [r7]
 2:     cmp     r4, r5
        ldrcc   r7, [r4], #4    @ use branch for delay slot
        bcc     1b
        bx      lr
 #else
+       moveq   r0, #0x400000   @ set bit 22, mov to mvn instruction
        b       2f
 1:     ldr     ip, [r7, r3]
        bic     ip, ip, #0x000000ff
-       orr     ip, ip, r6      @ mask in offset bits 31-24
+       tst     ip, #0xf00      @ check the rotation field
+       orrne   ip, ip, r6      @ mask in offset bits 31-24
+       biceq   ip, ip, #0x400000       @ clear bit 22
+       orreq   ip, ip, r0      @ mask in offset bits 7-0
        str     ip, [r7, r3]
 2:     cmp     r4, r5
        ldrcc   r7, [r4], #4    @ use branch for delay slot
@@ -602,28 +651,29 @@ __fixup_a_pv_table:
 #endif
 ENDPROC(__fixup_a_pv_table)
 
+3:     .long __pv_offset
+
 ENTRY(fixup_pv_table)
        stmfd   sp!, {r4 - r7, lr}
-       ldr     r2, 2f                  @ get address of __pv_phys_offset
        mov     r3, #0                  @ no offset
        mov     r4, r0                  @ r0 = table start
        add     r5, r0, r1              @ r1 = table size
-       ldr     r6, [r2, #4]            @ get __pv_offset
        bl      __fixup_a_pv_table
        ldmfd   sp!, {r4 - r7, pc}
 ENDPROC(fixup_pv_table)
 
-       .align
-2:     .long   __pv_phys_offset
-
        .data
        .globl  __pv_phys_offset
        .type   __pv_phys_offset, %object
 __pv_phys_offset:
-       .long   0
-       .size   __pv_phys_offset, . - __pv_phys_offset
+       .quad   0
+       .size   __pv_phys_offset, . -__pv_phys_offset
+
+       .globl  __pv_offset
+       .type   __pv_offset, %object
 __pv_offset:
-       .long   0
+       .quad   0
+       .size   __pv_offset, . -__pv_offset
 #endif
 
 #include "head-common.S"
index e186ee1e63f6c85261f96844a594080e719c2e07..bc3f2efa0d86b4ff55d6b19833eae688b111fd27 100644 (file)
@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
               struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct pmu *leader_pmu = event->group_leader->pmu;
 
        if (is_software_event(event))
                return 1;
 
-       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+       if (event->state < PERF_EVENT_STATE_OFF)
                return 1;
 
        if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644 (file)
index 0000000..6e4379c
--- /dev/null
@@ -0,0 +1,30 @@
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+       if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
+               return 0;
+
+       return regs->uregs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+       if (!mask || mask & REG_RESERVED)
+               return -EINVAL;
+
+       return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+       return PERF_SAMPLE_REGS_ABI_32;
+}
index 0e1e2b3afa45864b5776c177ceb5001402e31681..6b4ce802ac4ea3e2d88bbe04adce6713651fb066 100644 (file)
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
 #endif
 
 extern void paging_init(const struct machine_desc *desc);
+extern void early_paging_init(const struct machine_desc *,
+                             struct proc_info_list *);
 extern void sanity_check_meminfo(void);
 extern enum reboot_mode reboot_mode;
 extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@ static void __init setup_processor(void)
        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 #endif
 
+       erratum_a15_798181_init();
+
        feat_v6_fixup();
 
        cacheid_init();
@@ -878,6 +882,8 @@ void __init setup_arch(char **cmdline_p)
        parse_early_param();
 
        sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+       early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
        sanity_check_meminfo();
        arm_memblock_init(&meminfo, mdesc);
 
index db1536b8b30b497fe11923dfe9f67a72f86f7954..62246020191138e7d3bcdc90cf5d7c645b7ec1b9 100644 (file)
@@ -55,6 +55,7 @@
  * specific registers and some other data for resume.
  *  r0 = suspend function arg0
  *  r1 = suspend function
+ *  r2 = MPIDR value the resuming CPU will use
  */
 ENTRY(__cpu_suspend)
        stmfd   sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
        mov     r5, sp                  @ current virtual SP
        add     r4, r4, #12             @ Space for pgd, virt sp, phys resume fn
        sub     sp, sp, r4              @ allocate CPU state on stack
-       stmfd   sp!, {r0, r1}           @ save suspend func arg and pointer
-       add     r0, sp, #8              @ save pointer to save block
-       mov     r1, r4                  @ size of save block
-       mov     r2, r5                  @ virtual SP
        ldr     r3, =sleep_save_sp
+       stmfd   sp!, {r0, r1}           @ save suspend func arg and pointer
        ldr     r3, [r3, #SLEEP_SAVE_SP_VIRT]
-       ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
-        ALT_UP_B(1f)
-       ldr     r8, =mpidr_hash
-       /*
-        * This ldmia relies on the memory layout of the mpidr_hash
-        * struct mpidr_hash.
-        */
-       ldmia   r8, {r4-r7}     @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
-       compute_mpidr_hash      lr, r5, r6, r7, r9, r4
-       add     r3, r3, lr, lsl #2
-1:
+       ALT_SMP(ldr r0, =mpidr_hash)
+       ALT_UP_B(1f)
+       /* This ldmia relies on the memory layout of the mpidr_hash struct */
+       ldmia   r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
+       compute_mpidr_hash      r0, r6, r7, r8, r2, r1
+       add     r3, r3, r0, lsl #2
+1:     mov     r2, r5                  @ virtual SP
+       mov     r1, r4                  @ size of save block
+       add     r0, sp, #8              @ pointer to save block
        bl      __cpu_suspend_save
        adr     lr, BSYM(cpu_suspend_abort)
        ldmfd   sp!, {r0, pc}           @ call suspend fn
index 72024ea8a3a6c07038103e153527cb2454e4552f..5c820cbcf918140873650f9a4a61376d4c951bcf 100644 (file)
@@ -66,6 +66,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_COMPLETION,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +81,7 @@ void __init smp_set_ops(struct smp_operations *ops)
 
 static unsigned long get_arch_pgd(pgd_t *pgd)
 {
-       phys_addr_t pgdir = virt_to_phys(pgd);
+       phys_addr_t pgdir = virt_to_idmap(pgd);
        BUG_ON(pgdir & ARCH_PGD_MASK);
        return pgdir >> ARCH_PGD_SHIFT;
 }
@@ -456,6 +457,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_COMPLETION, "completion interrupts"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +517,19 @@ static void ipi_cpu_stop(unsigned int cpu)
                cpu_relax();
 }
 
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+       per_cpu(cpu_completion, cpu) = completion;
+       return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+       complete(per_cpu(cpu_completion, cpu));
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -565,6 +580,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+       case IPI_COMPLETION:
+               irq_enter();
+               ipi_complete(cpu);
+               irq_exit();
+               break;
+
        default:
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
index 83ccca303df83c4a1f40dce35f324153d979ad16..95d063620b76a6f706bccc23635537ed4bceb01a 100644 (file)
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
        local_flush_bp_all();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb(ish);
+       return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb(ish);
+       return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+       unsigned int midr = read_cpuid_id();
+       unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+       /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+           (revidr & 0x210) == 0x210) {
+               return;
+       }
+       if (revidr & 0x10)
+               erratum_a15_798181_handler = erratum_a15_798181_partial;
+       else
+               erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
 static void ipi_flush_tlb_a15_erratum(void *arg)
 {
        dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
        if (!erratum_a15_798181())
                return;
 
-       dummy_flush_tlb_a15_erratum();
        smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
 }
 
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
        if (!erratum_a15_798181())
                return;
 
-       dummy_flush_tlb_a15_erratum();
        this_cpu = get_cpu();
        a15_erratum_get_cpumask(this_cpu, mm, &mask);
        smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
index 41cf3cbf756de473be3bb1ebb268445aeacadc5d..2835d35234ca459f4d7086a6f811ff35652a6a11 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
 extern void cpu_resume_mmu(void);
 
 #ifdef CONFIG_MMU
@@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
        struct mm_struct *mm = current->active_mm;
+       u32 __mpidr = cpu_logical_map(smp_processor_id());
        int ret;
 
        if (!idmap_pgd)
@@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         * resume (indicated by a zero return code), we need to switch
         * back to the correct page tables.
         */
-       ret = __cpu_suspend(arg, fn);
+       ret = __cpu_suspend(arg, fn, __mpidr);
        if (ret == 0) {
                cpu_switch_mm(mm->pgd, mm);
                local_flush_bp_all();
@@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 #else
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       return __cpu_suspend(arg, fn);
+       u32 __mpidr = cpu_logical_map(smp_processor_id());
+       return __cpu_suspend(arg, fn, __mpidr);
 }
 #define        idmap_pgd       NULL
 #endif
index 71e08baee209387f899e14a32fa32e02682b0ae8..c02ba4af599f417113fdb2c260270ae7162575e6 100644 (file)
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       struct kvm_regs *cpu_reset;
+       struct kvm_regs *reset_regs;
        const struct kvm_irq_level *cpu_vtimer_irq;
 
        switch (vcpu->arch.target) {
        case KVM_ARM_TARGET_CORTEX_A15:
                if (vcpu->vcpu_id > a15_max_cpu_idx)
                        return -EINVAL;
-               cpu_reset = &a15_regs_reset;
+               reset_regs = &a15_regs_reset;
                vcpu->arch.midr = read_cpuid_id();
                cpu_vtimer_irq = &a15_vtimer_irq;
                break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        }
 
        /* Reset core registers */
-       memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+       memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
 
        /* Reset CP15 registers */
        kvm_reset_coprocs(vcpu);
index d6408d1ee543fe5e3ceabbcda01b25efb07676ba..e0c68d5bb7dc25dd3fa93dc0fa1b3899f5b09019 100644 (file)
@@ -10,6 +10,11 @@ UNWIND(      .fnstart        )
        and     r3, r0, #31             @ Get bit offset
        mov     r0, r0, lsr #5
        add     r1, r1, r0, lsl #2      @ Get word offset
+#if __LINUX_ARM_ARCH__ >= 7
+       .arch_extension mp
+       ALT_SMP(W(pldw) [r1])
+       ALT_UP(W(nop))
+#endif
        mov     r3, r2, lsl r3
 1:     ldrex   r2, [r1]
        \instr  r2, r2, r3
index 025f742dd4df6bf79b279babd264980d851f01d5..3e58d710013c3ad9b377fc76e6dad58f377e88a7 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/hardirq.h> /* for in_atomic() */
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/hugetlb.h>
 #include <asm/current.h>
 #include <asm/page.h>
 
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
                return 0;
 
        pmd = pmd_offset(pud, addr);
-       if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+       if (unlikely(pmd_none(*pmd)))
+               return 0;
+
+       /*
+        * A pmd can be bad if it refers to a HugeTLB or THP page.
+        *
+        * Both THP and HugeTLB pages have the same pmd layout
+        * and should not be manipulated by the pte functions.
+        *
+        * Lock the page table for the destination and check
+        * to see that it's still huge and whether or not we will
+        * need to fault on write, or if we have a splitting THP.
+        */
+       if (unlikely(pmd_thp_or_huge(*pmd))) {
+               ptl = &current->mm->page_table_lock;
+               spin_lock(ptl);
+               if (unlikely(!pmd_thp_or_huge(*pmd)
+                       || pmd_hugewillfault(*pmd)
+                       || pmd_trans_splitting(*pmd))) {
+                       spin_unlock(ptl);
+                       return 0;
+               }
+
+               *ptep = NULL;
+               *ptlp = ptl;
+               return 1;
+       }
+
+       if (unlikely(pmd_bad(*pmd)))
                return 0;
 
        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
                from += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        if (!atomic)
                up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
                addr += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        up_read(&current->mm->mmap_sem);
 
index 3b0a9538093c168cb4cd80f4ff559b0458d0dfb3..c1b737097c9543faf67c4ed78f2617e3add0606f 100644 (file)
@@ -98,7 +98,6 @@ obj-y                         += leds.o
 # Power Management
 obj-$(CONFIG_PM)               += pm.o
 obj-$(CONFIG_AT91_SLOW_CLOCK)  += pm_slowclock.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
 
 ifeq ($(CONFIG_PM_DEBUG),y)
 CFLAGS_pm.o += -DDEBUG
index 4aad93d54d6f059cc275ac819d7c55df196df4b1..25805f2f6010f3d7b98035f9c8e3b1e09524a4e3 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -327,6 +328,7 @@ static void __init at91rm9200_ioremap_registers(void)
 {
        at91rm9200_ioremap_st(AT91RM9200_BASE_ST);
        at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256);
+       at91_pm_set_standby(at91rm9200_standby);
 }
 
 static void __init at91rm9200_initialize(void)
index 180b3024bec3ab36cc2d7cdb62e92d7b2298d297..f607deb40f4da6a88a0778b203cdcfbbc8518ecc 100644 (file)
@@ -93,7 +93,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
 
 static struct irqaction at91rm9200_timer_irq = {
        .name           = "at91_tick",
-       .flags          = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .flags          = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = at91rm9200_timer_interrupt,
        .irq            = NR_IRQS_LEGACY + AT91_ID_SYS,
 };
index 5de6074b4f4f3681d308cfb34a8554a05550b2f2..f8629a3fa2452791b6215c19f77124813e1d3f5f 100644 (file)
@@ -28,6 +28,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -342,6 +343,7 @@ static void __init at91sam9260_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9260_initialize(void)
index 0e0793241ab7e1938dc4ec3fd961557344342369..1f3867a17a289beee4bb1a44ae64ca3bc96b044d 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -284,6 +285,7 @@ static void __init at91sam9261_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9261_initialize(void)
index 6ce7d18508934a428ea2ac94ab51162b683b43d1..90d455d294a1814e7e72722b5614b4d15548c298 100644 (file)
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -321,6 +322,7 @@ static void __init at91sam9263_ioremap_registers(void)
        at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
        at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
        at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9263_initialize(void)
index 3a4bc2e1a65e73d3b2187f90f6b84a43e7ec6468..bb392320a0dd39d978bd5f1d3e861d2fce14b14c 100644 (file)
@@ -171,7 +171,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
 
 static struct irqaction at91sam926x_pit_irq = {
        .name           = "at91_tick",
-       .flags          = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .flags          = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = at91sam926x_pit_interrupt,
        .irq            = NR_IRQS_LEGACY + AT91_ID_SYS,
 };
index 474ee04d24b93729bdf32e2e4d64a7db0ea9bed8..e9bf0b8f40eb745d317e2d1d50c500eea1491aa4 100644 (file)
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -370,6 +371,7 @@ static void __init at91sam9g45_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX);
+       at91_pm_set_standby(at91_ddr_standby);
 }
 
 static void __init at91sam9g45_initialize(void)
index 721a1a34dd1d86027161e66b9cfaee7161ebfd03..c40c1e2ef80fa9d1485c06669afad3caf0ea17fc 100644 (file)
 #include "at91_rstc.h"
                        .arm
 
+/*
+ * at91_ramc_base is an array void*
+ * init at NULL if only one DDR controler is present in or DT
+ */
                        .globl  at91sam9g45_restart
 
 at91sam9g45_restart:
                        ldr     r5, =at91_ramc_base             @ preload constants
                        ldr     r0, [r5]
+                       ldr     r5, [r5, #4]                    @ ddr1
+                       cmp     r5, #0
                        ldr     r4, =at91_rstc_base
                        ldr     r1, [r4]
 
@@ -30,6 +36,8 @@ at91sam9g45_restart:
 
                        .balign 32                              @ align to cache line
 
+                       strne   r2, [r5, #AT91_DDRSDRC_RTR]     @ disable DDR1 access
+                       strne   r3, [r5, #AT91_DDRSDRC_LPR]     @ power down DDR1
                        str     r2, [r0, #AT91_DDRSDRC_RTR]     @ disable DDR0 access
                        str     r3, [r0, #AT91_DDRSDRC_LPR]     @ power down DDR0
                        str     r4, [r1, #AT91_RSTC_CR]         @ reset processor
index d4ec0d9a9872660e22f2e3c95fc2e8c154cf38e6..88995af09c043abf6198124d1c651d60ac03b0ff 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -287,6 +288,7 @@ static void __init at91sam9rl_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9rl_initialize(void)
index 2919eba41ff4e908cd85f4d7eb3cc76a0e5fef56..c0e637adf65d2555adddaa1897730fc4021d04d7 100644 (file)
@@ -57,7 +57,7 @@ static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id)
 
 static struct irqaction at91x40_timer_irq = {
        .name           = "at91_tick",
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
+       .flags          = IRQF_TIMER,
        .handler        = at91x40_timer_interrupt
 };
 
index 0b153c87521d8e73feaf6b9bc53291431bc6b7a8..f4f8735315dafd8247f686e756423d8b9608a013 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/at73c213.h>
 #include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 
index 3284df05df14be82b8bf7acce98ef090ccf09b56..947e134ac4c3996700a6b075d24ab5afbeb2dc3f 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/fb.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
index 15afb5d9271f2237e3c61acd920b2f9c6d6e0a78..9986542e8060119fcad4eee37564fdf3d8d164a6 100644 (file)
@@ -39,6 +39,8 @@
 #include "at91_rstc.h"
 #include "at91_shdwc.h"
 
+static void (*at91_pm_standby)(void);
+
 static void __init show_reset_status(void)
 {
        static char reset[] __initdata = "reset";
@@ -266,14 +268,8 @@ static int at91_pm_enter(suspend_state_t state)
                         * For ARM 926 based chips, this requirement is weaker
                         * as at91sam9 can access a RAM in self-refresh mode.
                         */
-                       if (cpu_is_at91rm9200())
-                               at91rm9200_standby();
-                       else if (cpu_is_at91sam9g45())
-                               at91sam9g45_standby();
-                       else if (cpu_is_at91sam9263())
-                               at91sam9263_standby();
-                       else
-                               at91sam9_standby();
+                       if (at91_pm_standby)
+                               at91_pm_standby();
                        break;
 
                case PM_SUSPEND_ON:
@@ -314,6 +310,18 @@ static const struct platform_suspend_ops at91_pm_ops = {
        .end    = at91_pm_end,
 };
 
+static struct platform_device at91_cpuidle_device = {
+       .name = "cpuidle-at91",
+};
+
+void at91_pm_set_standby(void (*at91_standby)(void))
+{
+       if (at91_standby) {
+               at91_cpuidle_device.dev.platform_data = at91_standby;
+               at91_pm_standby = at91_standby;
+       }
+}
+
 static int __init at91_pm_init(void)
 {
 #ifdef CONFIG_AT91_SLOW_CLOCK
@@ -325,6 +333,9 @@ static int __init at91_pm_init(void)
        /* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */
        if (cpu_is_at91rm9200())
                at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0);
+       
+       if (at91_cpuidle_device.dev.platform_data)
+               platform_device_register(&at91_cpuidle_device);
 
        suspend_set_ops(&at91_pm_ops);
 
index 2f5908f0b8c5ec6dc9481f85bcba758d98f00d88..3ed190ce062bd5add5a426e07b1cf6c5d1e5b89a 100644 (file)
 #ifndef __ARCH_ARM_MACH_AT91_PM
 #define __ARCH_ARM_MACH_AT91_PM
 
+#include <asm/proc-fns.h>
+
 #include <mach/at91_ramc.h>
 #include <mach/at91rm9200_sdramc.h>
 
+extern void at91_pm_set_standby(void (*at91_standby)(void));
+
 /*
  * The AT91RM9200 goes into self-refresh mode with this command, and will
  * terminate self-refresh automatically on the next SDRAM access.
@@ -45,16 +49,18 @@ static inline void at91rm9200_standby(void)
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9g45_standby(void)
+static inline void at91_ddr_standby(void)
 {
        /* Those two values allow us to delay self-refresh activation
         * to the maximum. */
-       u32 lpr0, lpr1;
-       u32 saved_lpr0, saved_lpr1;
+       u32 lpr0, lpr1 = 0;
+       u32 saved_lpr0, saved_lpr1 = 0;
 
-       saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
-       lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
-       lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+       if (at91_ramc_base[1]) {
+               saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+               lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+               lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+       }
 
        saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
        lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
@@ -62,25 +68,29 @@ static inline void at91sam9g45_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
-       at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
-       at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9263_standby(void)
+static inline void at91sam9_sdram_standby(void)
 {
-       u32 lpr0, lpr1;
-       u32 saved_lpr0, saved_lpr1;
+       u32 lpr0, lpr1 = 0;
+       u32 saved_lpr0, saved_lpr1 = 0;
 
-       saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
-       lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
-       lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+       if (at91_ramc_base[1]) {
+               saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+               lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+               lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+       }
 
        saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
        lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
@@ -88,27 +98,14 @@ static inline void at91sam9263_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
-       at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
-       at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
-
-static inline void at91sam9_standby(void)
-{
-       u32 saved_lpr, lpr;
-
-       saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR);
-
-       lpr = saved_lpr & ~AT91_SDRAMC_LPCB;
-       at91_ramc_write(0, AT91_SDRAMC_LPR, lpr |
-                       AT91_SDRAMC_LPCB_SELF_REFRESH);
-
-       cpu_do_idle();
-
-       at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 }
 
 #endif
index b17fbcf4d9e8f34dbd876ea96af334dae4780efc..094b3459c288e37700c42ea85a57ced905323eda 100644 (file)
@@ -23,6 +23,7 @@
 #include "at91_shdwc.h"
 #include "soc.h"
 #include "generic.h"
+#include "pm.h"
 
 struct at91_init_soc __initdata at91_boot_soc;
 
@@ -376,15 +377,16 @@ static void at91_dt_rstc(void)
 }
 
 static struct of_device_id ramc_ids[] = {
-       { .compatible = "atmel,at91rm9200-sdramc" },
-       { .compatible = "atmel,at91sam9260-sdramc" },
-       { .compatible = "atmel,at91sam9g45-ddramc" },
+       { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+       { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+       { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
        { /*sentinel*/ }
 };
 
 static void at91_dt_ramc(void)
 {
        struct device_node *np;
+       const struct of_device_id *of_id;
 
        np = of_find_matching_node(NULL, ramc_ids);
        if (!np)
@@ -396,6 +398,12 @@ static void at91_dt_ramc(void)
        /* the controller may have 2 banks */
        at91_ramc_base[1] = of_iomap(np, 1);
 
+       of_id = of_match_node(ramc_ids, np);
+       if (!of_id)
+               pr_warn("AT91: ramc no standby function available\n");
+       else
+               at91_pm_set_standby(of_id->data);
+
        of_node_put(np);
 }
 
index e026b19b23eaf1d8546f4996a387c13e1b8ed670..a075b3e0c5c7a5229aa87163e732d9c6b49a7a50 100644 (file)
@@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
        select ARCH_DAVINCI_DA8XX
        select ARCH_HAS_CPUFREQ
-       select CPU_FREQ_TABLE
        select CP_INTC
 
 config ARCH_DAVINCI_DA8XX
index c4bdc0a1c36e7795a21862f8a69a7e8ead71a43b..66b5b3cb53768630812e0ef61429644ecdb7373e 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/spi/spi.h>
index dd1fb24521aa85b16baa9d200b40d28774591cfc..f25a569b000989120d758245f01929c1cb7ff461 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/platform_data/pca953x.h>
 #include <linux/input.h>
 #include <linux/input/tps6507x-ts.h>
index 92b7f770615a83aaf59d5b81ef3526732185df86..7f260b77157a3749851350cbca30272efecb0f5d 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/leds.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -176,7 +176,7 @@ static struct at24_platform_data eeprom_info = {
        .context        = (void *)0x7f00,
 };
 
-static struct snd_platform_data dm365_evm_snd_data = {
+static struct snd_platform_data dm365_evm_snd_data __maybe_unused = {
        .asp_chan_q = EVENTQ_3,
 };
 
index 40bb9b5b87e829c2d4b741625141a58732921d9f..f21fde9dce007a6dcc249d0e3458572b8b16977c 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
index 2bc3651d56cc8f52757b3703c14daadf2360abeb..db2df32da6a887d4042d810c525b1cf9e59c8753 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/i2c/pcf857x.h>
 
 #include <media/tvp514x.h>
index cd0f58730c2ba63234fb5e673156c10214300a80..7aa105b1fd0f7553170134c9a8dae654c931784d 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/etherdevice.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
index d84360148100831265561696b1e661f16a035361..41c7c961579133d9a80070a2c46ce56c07f1d8d9 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
index 52b8571b2e702a2e33b689983ca6fcc8632469bb..ce402cd21fa0a6f809963d71ca3cac3c6f650131 100644 (file)
@@ -15,8 +15,6 @@
 
 #include <mach/hardware.h>
 
-#include <linux/platform_device.h>
-
 #define DAVINCI_UART0_BASE     (IO_PHYS + 0x20000)
 #define DAVINCI_UART1_BASE     (IO_PHYS + 0x20400)
 #define DAVINCI_UART2_BASE     (IO_PHYS + 0x20800)
@@ -39,6 +37,8 @@
 #define UART_DM646X_SCR_TX_WATERMARK   0x08
 
 #ifndef __ASSEMBLY__
+#include <linux/platform_device.h>
+
 extern int davinci_serial_init(struct platform_device *);
 #endif
 
index ba95e5db25011a0801c2ef1f7c27270b713280ee..c17407b16d7c687292fa6e064978d9ca1699ea0a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/clk-provider.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/exception.h>
@@ -294,6 +295,16 @@ void exynos5_restart(enum reboot_mode mode, const char *cmd)
        __raw_writel(val, addr);
 }
 
+static struct platform_device exynos_cpuidle = {
+       .name           = "exynos_cpuidle",
+       .id             = -1,
+};
+
+void __init exynos_cpuidle_init(void)
+{
+       platform_device_register(&exynos_cpuidle);
+}
+
 void __init exynos_init_late(void)
 {
        if (of_machine_is_compatible("samsung,exynos5440"))
index 8646a141ae467b8175aed00d0acf39ae73f02097..b2ac1885d381d6581717a111a92f6cec25723abe 100644 (file)
@@ -22,6 +22,7 @@ struct map_desc;
 void exynos_init_io(void);
 void exynos4_restart(enum reboot_mode mode, const char *cmd);
 void exynos5_restart(enum reboot_mode mode, const char *cmd);
+void exynos_cpuidle_init(void);
 void exynos_init_late(void);
 
 void exynos_firmware_init(void);
index ac139226d63c1d1aefe86a7e25087ad7e6da36da..1bde6ad07d93e5a0fe6ed69d7ecc840268be2586 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/export.h>
 #include <linux/time.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/smp_scu.h>
@@ -192,7 +193,7 @@ static void __init exynos5_core_down_clk(void)
        __raw_writel(tmp, EXYNOS5_PWR_CTRL2);
 }
 
-static int __init exynos4_init_cpuidle(void)
+static int __init exynos_cpuidle_probe(struct platform_device *pdev)
 {
        int cpu_id, ret;
        struct cpuidle_device *device;
@@ -226,4 +227,13 @@ static int __init exynos4_init_cpuidle(void)
 
        return 0;
 }
-device_initcall(exynos4_init_cpuidle);
+
+static struct platform_driver exynos_cpuidle_driver = {
+       .probe  = exynos_cpuidle_probe,
+       .driver = {
+               .name = "exynos_cpuidle",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
index 0099c6c13bbaa9504dccd6fa760c21273de41d3f..53a3dc37a7303b0a5bbdbdc202fbe211e6b0dd9e 100644 (file)
@@ -25,6 +25,8 @@
 
 static void __init exynos4_dt_machine_init(void)
 {
+       exynos_cpuidle_init();
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index f874b773ca134e231df3e21d3c5c5f65caac33a9..c9f7dd1cdc8f9fd0f7a9c75fd4bb625ad42366f7 100644 (file)
@@ -47,6 +47,8 @@ static void __init exynos5_dt_machine_init(void)
                }
        }
 
+       exynos_cpuidle_init();
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 8e8437dea3ce7742da2cd8cae56df82efa2dfe93..e2ca238cf0ea7fef2d22d027eb04550356cbba26 100644 (file)
@@ -8,7 +8,7 @@ config ARCH_HIGHBANK
        select ARM_AMBA
        select ARM_ERRATA_764369
        select ARM_ERRATA_775420
-       select ARM_ERRATA_798181
+       select ARM_ERRATA_798181 if SMP
        select ARM_GIC
        select ARM_TIMER_SP804
        select CACHE_L2X0
index deb4b8093b30487821b5e43663adc594ae763958..0d40b35c557cba39980ad2644e6d4a97902f9555 100644 (file)
@@ -90,6 +90,7 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
        init.ops = &clk_fixup_mux_ops;
        init.parent_names = parents;
        init.num_parents = num_parents;
+       init.flags = 0;
 
        fixup_mux->mux.reg = reg;
        fixup_mux->mux.shift = shift;
index c3cfa4116dc09ffe22f2227c47130be0d900945e..c6b40f3867863c7de1a52e5a1ab72acdc23f758f 100644 (file)
@@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
        clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
        clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
-       clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0");
+       clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
        clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
 
        mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
index 1a56a33199976ed66907a80bce0336a03f40dcf2..7c0dc4540aa4785270784e3f62ce2b643fd95664 100644 (file)
@@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
        clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
        clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
-       clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
+       clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
        clk_register_clkdev(clk[iim_gate], "iim", NULL);
        clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
        clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
@@ -397,7 +397,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
                                mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
        clk[spdif1_sel] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
                                spdif_sel, ARRAY_SIZE(spdif_sel));
-       clk[spdif1_pred] = imx_clk_divider("spdif1_podf", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
+       clk[spdif1_pred] = imx_clk_divider("spdif1_pred", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
        clk[spdif1_podf] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6);
        clk[spdif1_com_sel] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1,
                                mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel));
index 85a1b51346c8db12845d3123dd7758af034e3357..699aabe296e1e62b91f7ca1b0a65c5e635ed8515 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/phy.h>
 #include <linux/reboot.h>
 #include <linux/regmap.h>
@@ -226,17 +226,22 @@ static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
        val = readl_relaxed(base + OCOTP_CFG3);
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
-               if (opp_disable(cpu_dev, 1200000000))
+               if (dev_pm_opp_disable(cpu_dev, 1200000000))
                        pr_warn("failed to disable 1.2 GHz OPP\n");
 
 put_node:
        of_node_put(np);
 }
 
-static void __init imx6q_opp_init(struct device *cpu_dev)
+static void __init imx6q_opp_init(void)
 {
        struct device_node *np;
+       struct device *cpu_dev = get_cpu_device(0);
 
+       if (!cpu_dev) {
+               pr_warn("failed to get cpu0 device\n");
+               return;
+       }
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
                pr_warn("failed to find cpu0 node\n");
@@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
                imx6q_cpuidle_init();
 
        if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
-               imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+               imx6q_opp_init();
                platform_device_register(&imx6q_cpufreq_pdev);
        }
 }
index 19bb6441a7d4aaddb106afa71634606acfae7466..c5f95674e9b72c8caa61052a25c7e90521b3ce42 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/dma-mapping.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/eeprom.h>
index bc0261e99d398f1632986cccf9785a58519c809c..20cc53f4cee1f1372c619925d0fe74e47eafd075 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/smsc911x.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
index e805ac273e9c6cc952e400a4f542f9727881650c..592ddbe031ac714bf7d2d0f0519ba38a3957d0ee 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/io.h>
 #include <linux/mtd/plat-ram.h>
 #include <linux/mtd/physmap.h>
index b726cb1c5fdd638326148cb4ce66783f69dd7659..ac504b67326b12b51ba9c1ccea470b85e74b1dd3 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 
index 0910761e8280894918927554598c1f4d1d1f4b79..8825d1217d189e7d026dff24f7b2e41af54fde4a 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/mach/time.h>
 
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mfd/mc13xxx.h>
 
 #include "common.h"
index 64ff37ea72b17455a1b5be7600ff7a1c16ceab16..80c177c36c5f25665ada2fbf93cf2ac4b007eee6 100644 (file)
@@ -117,6 +117,17 @@ void __init imx_init_l2cache(void)
        /* Configure the L2 PREFETCH and POWER registers */
        val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
        val |= 0x70800000;
+       /*
+        * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
+        * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2
+        * But according to ARM PL310 errata: 752271
+        * ID: 752271: Double linefill feature can cause data corruption
+        * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2
+        * Workaround: The only workaround to this erratum is to disable the
+        * double linefill feature. This is the default behavior.
+        */
+       if (cpu_is_imx6q())
+               val &= ~(1 << 30 | 1 << 23);
        writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
        val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
        writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
index 755fd29fed4a5f407ed3107abb4154637bdf5228..06a9e2e7d007b847f650b8efbf0856a1f47978ae 100644 (file)
@@ -1,2 +1,9 @@
 /* Simple oneliner include to the PCIv3 early init */
+#ifdef CONFIG_PCI
 extern int pci_v3_early_init(void);
+#else
+static inline int pci_v3_early_init(void)
+{
+       return 0;
+}
+#endif
index 489495976fcd794e211d10c82e59e087bd40b79c..8e3e4331c380dabd5c3a818626b4eabc7922776b 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/gpio.h>
 #include <asm/mach/time.h>
 #include <mach/kirkwood.h>
index 4c24303ec4816bfb6f1c4bc0695f57450a624517..58adf2fd9cfc98ea03f6b1f8cfe037ceb01bd78b 100644 (file)
@@ -140,6 +140,7 @@ int __init coherency_init(void)
                coherency_base = of_iomap(np, 0);
                coherency_cpu_base = of_iomap(np, 1);
                set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
+               of_node_put(np);
        }
 
        return 0;
@@ -147,9 +148,14 @@ int __init coherency_init(void)
 
 static int __init coherency_late_init(void)
 {
-       if (of_find_matching_node(NULL, of_coherency_table))
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, of_coherency_table);
+       if (np) {
                bus_register_notifier(&platform_bus_type,
                                      &mvebu_hwcc_platform_nb);
+               of_node_put(np);
+       }
        return 0;
 }
 
index 3cc4bef6401c160f2b8beb5c378889740424c546..27fc4f049474ed94b07cef00dfe3304b1165369c 100644 (file)
@@ -67,6 +67,7 @@ int __init armada_370_xp_pmsu_init(void)
                pr_info("Initializing Power Management Service Unit\n");
                pmsu_mp_base = of_iomap(np, 0);
                pmsu_reset_base = of_iomap(np, 1);
+               of_node_put(np);
        }
 
        return 0;
index f875124ff4f9e558ff9be36c08d6bbc9ab3fdf13..5175083cdb34650802288789c55a82aee8c20d08 100644 (file)
@@ -98,6 +98,7 @@ static int __init mvebu_system_controller_init(void)
                BUG_ON(!match);
                system_controller_base = of_iomap(np, 0);
                mvebu_sc = (struct mvebu_system_controller *)match->data;
+               of_node_put(np);
        }
 
        return 0;
index a7ce69286688434e0e195e3c8a2e24728ff6dc60..d68909b095f1c06b135bac8e07dee46408dd5d40 100644 (file)
@@ -300,7 +300,7 @@ static struct omap_lcd_config osk_lcd_config __initdata = {
 #ifdef CONFIG_OMAP_OSK_MISTRAL
 
 #include <linux/input.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 
index 33d159e2386e6a86801d7decced3522125c3b8fb..8dd0ec858cf1cc71372cb1e9d97d23232f7ed0de 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_data/gpio-omap.h>
 
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/i2c/twl.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
index 39c78387ddecb1b287ebfe732d7de0f3c62ca4db..87162e1b94a59104a2bca07114da5d8a02816fba 100644 (file)
@@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
        .restart        = omap3xxx_restart,
 MACHINE_END
 
+static const char *omap36xx_boards_compat[] __initdata = {
+       "ti,omap36xx",
+       NULL,
+};
+
+DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
+       .reserve        = omap_reserve,
+       .map_io         = omap3_map_io,
+       .init_early     = omap3630_init_early,
+       .init_irq       = omap_intc_of_init,
+       .handle_irq     = omap3_intc_handle_irq,
+       .init_machine   = omap_generic_init,
+       .init_late      = omap3_init_late,
+       .init_time      = omap3_sync32k_timer_init,
+       .dt_compat      = omap36xx_boards_compat,
+       .restart        = omap3xxx_restart,
+MACHINE_END
+
 static const char *omap3_gp_boards_compat[] __initdata = {
        "ti,omap3-beagle",
        "timll,omap3-devkit8000",
index 87e41a8b8d4666059c43a49c2f4ea953b3327fb2..f7808349a7346642a58994c2b5a7b43f3dddb5da 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/input.h>
 #include <linux/err.h>
 #include <linux/clk.h>
index f26918467efcf42cc13639317965ebdb6d34bf14..6432ab8d92078ac4779627d9ae143761bd78ebef 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include <linux/mtd/mtd.h>
@@ -522,11 +522,11 @@ static int __init beagle_opp_init(void)
                        return -ENODEV;
                }
                /* Enable MPU 1GHz and lower opps */
-               r = opp_enable(mpu_dev, 800000000);
+               r = dev_pm_opp_enable(mpu_dev, 800000000);
                /* TODO: MPU 1GHz needs SR and ABB */
 
                /* Enable IVA 800MHz and lower opps */
-               r |= opp_enable(iva_dev, 660000000);
+               r |= dev_pm_opp_enable(iva_dev, 660000000);
                /* TODO: DSP 800MHz needs SR and ABB */
                if (r) {
                        pr_err("%s: failed to enable higher opp %d\n",
@@ -535,8 +535,8 @@ static int __init beagle_opp_init(void)
                         * Cleanup - disable the higher freqs - we dont care
                         * about the results
                         */
-                       opp_disable(mpu_dev, 800000000);
-                       opp_disable(iva_dev, 660000000);
+                       dev_pm_opp_disable(mpu_dev, 800000000);
+                       dev_pm_opp_disable(iva_dev, 660000000);
                }
        }
        return 0;
index ba8342fef799ee10c3248bc75188bad47b5971bf..119efaf5808ab3df5291969f311e555704e84ee2 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
 #include <linux/smsc911x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/usb/phy.h>
 
 #include <asm/mach-types.h>
index c3270c0f1fce47724b0aba71d8f1ea388b94f445..f6fe388af9895ef8c8b2859f9177a146a30fb965 100644 (file)
@@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
                .name           = "lp5523:kb1",
                .chan_nr        = 0,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb2",
                .chan_nr        = 1,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb3",
                .chan_nr        = 2,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb4",
                .chan_nr        = 3,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:b",
                .chan_nr        = 4,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:g",
                .chan_nr        = 5,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:r",
                .chan_nr        = 6,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb5",
                .chan_nr        = 7,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb6",
                .chan_nr        = 8,
                .led_current    = 50,
+               .max_current    = 100,
        }
 };
 
index 1d5b5290d2af3db91f5d57b0a9c945a2ccaa2e28..b237950eb8a319f9fb879d051245cc7c6fea6968 100644 (file)
@@ -1632,7 +1632,7 @@ static struct omap_clk omap44xx_clks[] = {
        CLK(NULL,       "auxclk5_src_ck",               &auxclk5_src_ck),
        CLK(NULL,       "auxclk5_ck",                   &auxclk5_ck),
        CLK(NULL,       "auxclkreq5_ck",                &auxclkreq5_ck),
-       CLK("omap-gpmc",        "fck",                  &dummy_ck),
+       CLK("50000000.gpmc",    "fck",                  &dummy_ck),
        CLK("omap_i2c.1",       "ick",                  &dummy_ck),
        CLK("omap_i2c.2",       "ick",                  &dummy_ck),
        CLK("omap_i2c.3",       "ick",                  &dummy_ck),
index c443f2e97e103702531c79b70dccebc11a85218d..4c8982ae95295f508c1fc6610f8c47d1e1c8137d 100644 (file)
@@ -143,7 +143,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
         * Call idle CPU cluster PM exit notifier chain
         * to restore GIC and wakeupgen context.
         */
-       if ((cx->mpu_state == PWRDM_POWER_RET) &&
+       if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) &&
                (cx->mpu_logic_state == PWRDM_POWER_OFF))
                cpu_cluster_pm_exit();
 
index 64b5a83469822ad53693c2ae8ffdd3b227fa2a7b..8b6876c98ce1a320c793e7485c35d23bd3a5b923 100644 (file)
@@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
        struct gpmc_timings t;
        int ret;
 
-       if (gpmc_onenand_data->of_node)
+       if (gpmc_onenand_data->of_node) {
                gpmc_read_settings_dt(gpmc_onenand_data->of_node,
                                      &onenand_async);
+               if (onenand_async.sync_read || onenand_async.sync_write) {
+                       if (onenand_async.sync_write)
+                               gpmc_onenand_data->flags |=
+                                       ONENAND_SYNC_READWRITE;
+                       else
+                               gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
+                       onenand_async.sync_read = false;
+                       onenand_async.sync_write = false;
+               }
+       }
 
        omap2_onenand_set_async_mode(onenand_base);
 
index 9f4795aff48aae5eedcf874284bb88b9b3b2d777..579697adaae7dfd373285979eebeb815658f4a1d 100644 (file)
@@ -1491,8 +1491,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
         */
        ret = gpmc_cs_remap(cs, res.start);
        if (ret < 0) {
-               dev_err(&pdev->dev, "cannot remap GPMC CS %d to 0x%x\n",
-                       cs, res.start);
+               dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
+                       cs, &res.start);
                goto err;
        }
 
index 5d2080ef7923585c1313f598a40961c3b241d1ce..16f78a990d04cafbd7dd1fcaa81b7d7dd061e979 100644 (file)
@@ -28,7 +28,7 @@
 #define OMAP_PULL_UP                   (1 << 4)
 #define OMAP_ALTELECTRICALSEL          (1 << 5)
 
-/* 34xx specific mux bit defines */
+/* omap3/4/5 specific mux bit defines */
 #define OMAP_INPUT_EN                  (1 << 8)
 #define OMAP_OFF_EN                    (1 << 9)
 #define OMAP_OFFOUT_EN                 (1 << 10)
@@ -36,8 +36,6 @@
 #define OMAP_OFF_PULL_EN               (1 << 12)
 #define OMAP_OFF_PULL_UP               (1 << 13)
 #define OMAP_WAKEUP_EN                 (1 << 14)
-
-/* 44xx specific mux bit defines */
 #define OMAP_WAKEUP_EVENT              (1 << 15)
 
 /* Active pin states */
index c53609f4629485b46277aad1e0bdb5634f244c84..be271f1d585bfb5825e22edb93157dde999a6234 100644 (file)
@@ -620,7 +620,7 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
                "uart1_rts", "ssi1_flag_tx", NULL, NULL,
                "gpio_149", NULL, NULL, "safe_mode"),
        _OMAP3_MUXENTRY(UART1_RX, 151,
-               "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
+               "uart1_rx", "ssi1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
                "gpio_151", NULL, NULL, "safe_mode"),
        _OMAP3_MUXENTRY(UART1_TX, 148,
                "uart1_tx", "ssi1_dat_tx", NULL, NULL,
index 67faa7b8fe92fd932889b65d55adbe0c5a97e5de..1d777e63e05c94ccf754bab89a65ca681638c67f 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/device.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 /*
  * agent_id values for use with omap_pm_set_min_bus_tput():
index 8708b2a9da453e91be9bc16590d48e9652b35508..89121109329533b917561f4da6ce4b335872f4c4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * OMAP4 SMP source file. It contains platform specific fucntions
+ * OMAP4 SMP source file. It contains platform specific functions
  * needed for the linux smp kernel.
  *
  * Copyright (C) 2009 Texas Instruments, Inc.
index f99f68e1e85bd3748c5965a0bd27c47d0f6d538e..b69dd9abb50aeb0003998b24d7df9063a6a41cb1 100644 (file)
@@ -158,7 +158,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
        }
 
        od = omap_device_alloc(pdev, hwmods, oh_cnt);
-       if (!od) {
+       if (IS_ERR(od)) {
                dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
                        oh_name);
                ret = PTR_ERR(od);
index bd41d59a7cab08ecbca342a665f5f944aa67b6bc..ec21e6eb03e133be1c8b1ff2a4733d0f34052abc 100644 (file)
@@ -17,7 +17,7 @@
  * GNU General Public License for more details.
  */
 #include <linux/module.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include "omap_device.h"
@@ -81,14 +81,14 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
                        dev = &oh->od->pdev->dev;
                }
 
-               r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+               r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
                if (r) {
                        dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
                                __func__, opp_def->freq,
                                opp_def->hwmod_name, i, r);
                } else {
                        if (!opp_def->default_available)
-                               r = opp_disable(dev, opp_def->freq);
+                               r = dev_pm_opp_disable(dev, opp_def->freq);
                        if (r)
                                dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
                                        __func__, opp_def->freq,
index e742118fcfd2b69adc367057e6f17670a5136903..2f569b3c3092dbdaf4d3bbc2ec6a85253402db74 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/err.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/export.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
@@ -131,7 +131,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
 {
        struct voltagedomain *voltdm;
        struct clk *clk;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq, bootup_volt;
        struct device *dev;
 
@@ -172,7 +172,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
        clk_put(clk);
 
        rcu_read_lock();
-       opp = opp_find_freq_ceil(dev, &freq);
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                pr_err("%s: unable to find boot up OPP for vdd_%s\n",
@@ -180,7 +180,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
                goto exit;
        }
 
-       bootup_volt = opp_get_voltage(opp);
+       bootup_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        if (!bootup_volt) {
                pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
index fa74a0625da1a033335ea5fb76d6738889a9a1aa..ead48fa5715e16fb197dfa4fac56b0f71069bd20 100644 (file)
@@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
 #endif /* CONFIG_HAVE_ARM_TWD */
 #endif /* CONFIG_ARCH_OMAP4 */
 
-#ifdef CONFIG_SOC_OMAP5
+#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
 void __init omap5_realtime_timer_init(void)
 {
        omap4_sync32k_timer_init();
@@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
 
        clocksource_of_init();
 }
-#endif /* CONFIG_SOC_OMAP5 */
+#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
 
 /**
  * omap_timer_init - build and register timer device with an
index a8427115ee07b9a986d62dceac6006f8c9cecafd..96100dbf5a2e8353e9fad34ded41ec5e83dda7ad 100644 (file)
@@ -615,14 +615,12 @@ endmenu
 config PXA25x
        bool
        select CPU_XSCALE
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA21x/25x/26x variants
 
 config PXA27x
        bool
        select CPU_XSCALE
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA27x variants
 
@@ -635,7 +633,6 @@ config CPU_PXA26x
 config PXA3xx
        bool
        select CPU_XSC3
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA3xx variants
 
index 62aea3e835f315266ba887367310ec249503cd21..01de542432a6107b896277da2c5c392c65e9aff6 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <linux/i2c/pxa-i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/smc91x.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
index a83db46320bc6d1e8345ebee8aefd3845f9b347f..4a18d49a63e0b5d7e84dc0018f9473d8f12f164a 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/io.h>
 #include <linux/serial_core.h>
 #include <linux/dm9000.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/platform_device.h>
 #include <linux/gpio_keys.h>
 #include <linux/i2c.h>
index e838ba27e443c1dd0b54042c3e4afd882ae22265..c9808c6841526204144e36a273596e6955696dc8 100644 (file)
@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
         * Its called GPCLKR0 in my SA1110 manual.
         */
        Ser1SDCR0 |= SDCR0_SUS;
+       MSC1 = (MSC1 & ~0xffff) |
+               MSC_NonBrst | MSC_32BitStMem |
+               MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
 
        if (!machine_has_neponset())
                sa1100_register_uart_fns(&assabet_port_fns);
index 612a45689770752bc9e1319d2eeb430ae36a449c..7fb96ebdc0fbc74d37b1122fe0462026031a8889 100644 (file)
@@ -289,7 +289,7 @@ static void collie_flash_exit(void)
 }
 
 static struct flash_platform_data collie_flash_data = {
-       .map_name       = "cfi_probe",
+       .map_name       = "jedec_probe",
        .init           = collie_flash_init,
        .set_vpp        = collie_set_vpp,
        .exit           = collie_flash_exit,
index f25b6119e028cc42ee564712fb3ef746961e0574..cb4b2ca3cf6b9e3f4238b13b0aa678c8f8fed7b5 100644 (file)
@@ -42,23 +42,24 @@ EXPORT_SYMBOL(reset_status);
 /*
  * This table is setup for a 3.6864MHz Crystal.
  */
-static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
-        590,   /*  59.0 MHz */
-        737,   /*  73.7 MHz */
-        885,   /*  88.5 MHz */
-       1032,   /* 103.2 MHz */
-       1180,   /* 118.0 MHz */
-       1327,   /* 132.7 MHz */
-       1475,   /* 147.5 MHz */
-       1622,   /* 162.2 MHz */
-       1769,   /* 176.9 MHz */
-       1917,   /* 191.7 MHz */
-       2064,   /* 206.4 MHz */
-       2212,   /* 221.2 MHz */
-       2359,   /* 235.9 MHz */
-       2507,   /* 250.7 MHz */
-       2654,   /* 265.4 MHz */
-       2802    /* 280.2 MHz */
+struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
+       { .frequency = 59000,   /*  59.0 MHz */},
+       { .frequency = 73700,   /*  73.7 MHz */},
+       { .frequency = 88500,   /*  88.5 MHz */},
+       { .frequency = 103200,  /* 103.2 MHz */},
+       { .frequency = 118000,  /* 118.0 MHz */},
+       { .frequency = 132700,  /* 132.7 MHz */},
+       { .frequency = 147500,  /* 147.5 MHz */},
+       { .frequency = 162200,  /* 162.2 MHz */},
+       { .frequency = 176900,  /* 176.9 MHz */},
+       { .frequency = 191700,  /* 191.7 MHz */},
+       { .frequency = 206400,  /* 206.4 MHz */},
+       { .frequency = 221200,  /* 221.2 MHz */},
+       { .frequency = 235900,  /* 235.9 MHz */},
+       { .frequency = 250700,  /* 250.7 MHz */},
+       { .frequency = 265400,  /* 265.4 MHz */},
+       { .frequency = 280200,  /* 280.2 MHz */},
+       { .frequency = CPUFREQ_TABLE_END, },
 };
 
 /* rounds up(!)  */
@@ -66,10 +67,8 @@ unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
 {
        int i;
 
-       khz /= 100;
-
        for (i = 0; i < NR_FREQS; i++)
-               if (cclk_frequency_100khz[i] >= khz)
+               if (sa11x0_freq_table[i].frequency >= khz)
                        break;
 
        return i;
@@ -79,37 +78,15 @@ unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
 {
        unsigned int freq = 0;
        if (idx < NR_FREQS)
-               freq = cclk_frequency_100khz[idx] * 100;
+               freq = sa11x0_freq_table[idx].frequency;
        return freq;
 }
 
-
-/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
- * this platform, anyway.
- */
-int sa11x0_verify_speed(struct cpufreq_policy *policy)
-{
-       unsigned int tmp;
-       if (policy->cpu)
-               return -EINVAL;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-       /* make sure that at least one frequency is within the policy */
-       tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
-       if (tmp > policy->max)
-               policy->max = tmp;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-       return 0;
-}
-
 unsigned int sa11x0_getspeed(unsigned int cpu)
 {
        if (cpu)
                return 0;
-       return cclk_frequency_100khz[PPCR & 0xf] * 100;
+       return sa11x0_freq_table[PPCR & 0xf].frequency;
 }
 
 /*
index 9a33695c9492d238879a43dd64002a88d878df23..cbdfae744dc522183fb3b45918e5461a04aeab67 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Author: Nicolas Pitre
  */
+#include <linux/cpufreq.h>
 #include <linux/reboot.h>
 
 extern void sa1100_timer_init(void);
@@ -19,10 +20,8 @@ extern void sa11x0_init_late(void);
 extern void sa1110_mb_enable(void);
 extern void sa1110_mb_disable(void);
 
-struct cpufreq_policy;
-
+extern struct cpufreq_frequency_table sa11x0_freq_table[];
 extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
-extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
 extern unsigned int sa11x0_getspeed(unsigned int cpu);
 extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
 
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
deleted file mode 100644 (file)
index 6a9eecf..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/gpio.h
- *
- * SA1100 GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_SA1100_GPIO_H
-#define __ASM_ARCH_SA1100_GPIO_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-               return GPLR & GPIO_GPIO(gpio);
-       else
-               return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-               if (value)
-                       GPSR = GPIO_GPIO(gpio);
-               else
-                       GPCR = GPIO_GPIO(gpio);
-       else
-               __gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep  __gpio_cansleep
-
-#endif
index 7d9df16f04a2276ccceba5b8315cf296988a2334..c810620db53d60235916e8ecf25c6402f2b984b1 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef _INCLUDE_H3XXX_H_
 #define _INCLUDE_H3XXX_H_
 
+#include "hardware.h" /* Gives GPIO_MAX */
+
 /* Physical memory regions corresponding to chip selects */
 #define H3600_EGPIO_PHYS       (SA1100_CS5_PHYS + 0x01000000)
 #define H3600_BANK_2_PHYS      SA1100_CS2_PHYS
index 5bd1479d3deb7e98a0d38c3c192abd2824b2825d..7f8f6076d3609e82382bb98f116df3bfc59049ed 100644 (file)
@@ -1108,9 +1108,9 @@ static const struct pinctrl_map eva_pinctrl_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740",
                                  "fsib_mclk_in", "fsib"),
        /* GETHER */
-       PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740",
+       PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
                                  "gether_mii", "gether"),
-       PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740",
+       PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
                                  "gether_int", "gether"),
        /* HDMI */
        PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740",
index ffb6f0ac760643b79075441fe23f048d69d293c1..5930af8d434fb90c4a79fd8b0f8c225be9b93b58 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/gpio-rcar.h>
 #include <linux/platform_device.h>
+#include <linux/phy.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
 #include <linux/sh_eth.h>
@@ -155,6 +156,30 @@ static void __init lager_add_standard_devices(void)
                                          &ether_pdata, sizeof(ether_pdata));
 }
 
+/*
+ * Ether LEDs on the Lager board are named LINK and ACTIVE which corresponds
+ * to non-default 01 setting of the Micrel KSZ8041 PHY control register 1 bits
+ * 14-15. We have to set them back to 01 from the default 00 value each time
+ * the PHY is reset. It's also important because the PHY's LED0 signal is
+ * connected to SoC's ETH_LINK signal and in the PHY's default mode it will
+ * bounce on and off after each packet, which we apparently want to avoid.
+ */
+static int lager_ksz8041_fixup(struct phy_device *phydev)
+{
+       u16 phyctrl1 = phy_read(phydev, 0x1e);
+
+       phyctrl1 &= ~0xc000;
+       phyctrl1 |= 0x4000;
+       return phy_write(phydev, 0x1e, phyctrl1);
+}
+
+static void __init lager_init(void)
+{
+       lager_add_standard_devices();
+
+       phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
+}
+
 static const char *lager_boards_compat_dt[] __initdata = {
        "renesas,lager",
        NULL,
@@ -163,6 +188,6 @@ static const char *lager_boards_compat_dt[] __initdata = {
 DT_MACHINE_START(LAGER_DT, "lager")
        .init_early     = r8a7790_init_delay,
        .init_time      = r8a7790_timer_init,
-       .init_machine   = lager_add_standard_devices,
+       .init_machine   = lager_init,
        .dt_compat      = lager_boards_compat_dt,
 MACHINE_END
index 8ea5ef6c79ccbed859318cc69745488c0d273290..5bd2e851e3c7f2d03cd7ae4dd647ec1cc7d9c537 100644 (file)
@@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_CON_ID("pll2h",                  &pll2h_clk),
 
        /* CPU clock */
-       CLKDEV_DEV_ID("cpufreq-cpu0",           &z_clk),
+       CLKDEV_DEV_ID("cpu0",                   &z_clk),
 
        /* DIV6 */
        CLKDEV_CON_ID("zb",                     &div6_clks[DIV6_ZB]),
index 1942eaef518134804110ed14cc5a61c94550af27..c92c023f0d27c1de8778665e372d1abfba82db42 100644 (file)
@@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
 
        /* DIV4 clocks */
-       CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]),
+       CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
 
        /* DIV6 clocks */
        CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
index 67a76f2dfb9f62b99351e0e7ecb4d8c877b08d4b..f26428d8b62a18828a08cd0ff42bc08f35e5b206 100644 (file)
@@ -54,7 +54,7 @@ config ARCH_TEGRA_3x_SOC
 config ARCH_TEGRA_114_SOC
        bool "Enable support for Tegra114 family"
        select HAVE_ARM_ARCH_TIMER
-       select ARM_ERRATA_798181
+       select ARM_ERRATA_798181 if SMP
        select ARM_L1_CACHE_SHIFT_6
        select PINCTRL_TEGRA114
        help
index a85adcd00882e4ad193cd3b177179a5ad8162e6a..a1659863bfd5cb650338d6d8919ab0b145da1d57 100644 (file)
@@ -1,7 +1,3 @@
-menu "ST-Ericsson AB U300/U335 Platform"
-
-comment "ST-Ericsson Mobile Platform Products"
-
 config ARCH_U300
        bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
        depends on MMU
@@ -25,7 +21,9 @@ config ARCH_U300
        help
          Support for ST-Ericsson U300 series mobile platforms.
 
-comment "ST-Ericsson U300/U335 Feature Selections"
+if ARCH_U300
+
+menu "ST-Ericsson AB U300/U335 Platform"
 
 config MACH_U300
        depends on ARCH_U300
@@ -53,3 +51,5 @@ config MACH_U300_SPIDUMMY
                SPI framework and ARM PL022 support.
 
 endmenu
+
+endif
index 99a28d62829742d9103764c47abcb5ff24894fbf..7a3fc1af6944b2cd9175a46f7c1749c83a090220 100644 (file)
@@ -34,7 +34,6 @@ config UX500_SOC_COMMON
 
 config UX500_SOC_DB8500
        bool
-       select CPU_FREQ_TABLE if CPU_FREQ
        select MFD_DB8500_PRCMU
        select PINCTRL_DB8500
        select PINCTRL_DB8540
index 82ccf1d98735520ef4727c615398e478fccb8b02..264f894c0e3d228ca62280deb0aaec8b350a2fa1 100644 (file)
@@ -69,6 +69,7 @@ static int __init ux500_l2x0_init(void)
         * some SMI service available.
         */
        outer_cache.disable = NULL;
+       outer_cache.set_debug = NULL;
 
        return 0;
 }
index 7aeb5d60e484642d08ed119ecfd083b3c4074a46..e6eb4819291241f30b51d5e7b58c14d1d07c0d32 100644 (file)
@@ -131,6 +131,16 @@ static void tc2_pm_down(u64 residency)
        } else
                BUG();
 
+       /*
+        * If the CPU is committed to power down, make sure
+        * the power controller will be in charge of waking it
+        * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
+        * to the CPU by disabling the GIC CPU IF to prevent wfi
+        * from completing execution behind power controller back
+        */
+       if (!skip_wfi)
+               gic_cpu_if_down();
+
        if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
                arch_spin_unlock(&tc2_pm_lock);
 
@@ -231,7 +241,6 @@ static void tc2_pm_suspend(u64 residency)
        cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
        ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
-       gic_cpu_if_down();
        tc2_pm_down(residency);
 }
 
index 5f252569c68987d908546b55bb921d5f7693c7af..9a7bd137c8fd27d60f42a6b78c6dbf3cc234d348 100644 (file)
@@ -44,6 +44,10 @@ static struct of_device_id zynq_of_bus_ids[] __initdata = {
        {}
 };
 
+static struct platform_device zynq_cpuidle_device = {
+       .name = "cpuidle-zynq",
+};
+
 /**
  * zynq_init_machine - System specific initialization, intended to be
  *                    called from board specific initialization.
@@ -56,6 +60,8 @@ static void __init zynq_init_machine(void)
        l2x0_of_init(0x02060000, 0xF0F0FFFF);
 
        of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL);
+
+       platform_device_register(&zynq_cpuidle_device);
 }
 
 static void __init zynq_timer_init(void)
index f5e1a8471714cd421e2ab9dee54ae88f144316d1..644d91f73b00f3c5663b9e273d91d51a50218dd5 100644 (file)
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
 
 static u64 get_coherent_dma_mask(struct device *dev)
 {
-       u64 mask = (u64)arm_dma_limit;
+       u64 mask = (u64)DMA_BIT_MASK(32);
 
        if (dev) {
                mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
                        return 0;
                }
 
-               if ((~mask) & (u64)arm_dma_limit) {
-                       dev_warn(dev, "coherent DMA mask %#llx is smaller "
-                                "than system GFP_DMA mask %#llx\n",
-                                mask, (u64)arm_dma_limit);
+               /*
+                * If the mask allows for more memory than we can address,
+                * and we actually have that much memory, then fail the
+                * allocation.
+                */
+               if (sizeof(mask) != sizeof(dma_addr_t) &&
+                   mask > (dma_addr_t)~0 &&
+                   dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+                                mask);
+                       dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+                       return 0;
+               }
+
+               /*
+                * Now check that the mask, when translated to a PFN,
+                * fits within the allowable addresses which we can
+                * allocate.
+                */
+               if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+                                mask,
+                                dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+                                arm_dma_pfn_limit + 1);
                        return 0;
                }
        }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  */
 int dma_supported(struct device *dev, u64 mask)
 {
-       if (mask < (u64)arm_dma_limit)
+       unsigned long limit;
+
+       /*
+        * If the mask allows for more memory than we can address,
+        * and we actually have that much memory, then we must
+        * indicate that DMA to this device is not supported.
+        */
+       if (sizeof(mask) != sizeof(dma_addr_t) &&
+           mask > (dma_addr_t)~0 &&
+           dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
                return 0;
+
+       /*
+        * Translate the device's DMA mask to a PFN limit.  This
+        * PFN number includes the page which we can DMA to.
+        */
+       limit = dma_to_pfn(dev, mask);
+
+       if (limit < arm_dma_pfn_limit)
+               return 0;
+
        return 1;
 }
 EXPORT_SYMBOL(dma_supported);
@@ -1232,7 +1271,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
                                break;
 
                len = (j - i) << PAGE_SHIFT;
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               ret = iommu_map(mapping->domain, iova, phys, len,
+                               IOMMU_READ|IOMMU_WRITE);
                if (ret < 0)
                        goto fail;
                iova += len;
@@ -1431,6 +1471,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                                         GFP_KERNEL);
 }
 
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+       int prot;
+
+       switch (dir) {
+       case DMA_BIDIRECTIONAL:
+               prot = IOMMU_READ | IOMMU_WRITE;
+               break;
+       case DMA_TO_DEVICE:
+               prot = IOMMU_READ;
+               break;
+       case DMA_FROM_DEVICE:
+               prot = IOMMU_WRITE;
+               break;
+       default:
+               prot = 0;
+       }
+
+       return prot;
+}
+
 /*
  * Map a part of the scatter-gather list into contiguous io address space
  */
@@ -1444,6 +1505,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
        int ret = 0;
        unsigned int count;
        struct scatterlist *s;
+       int prot;
 
        size = PAGE_ALIGN(size);
        *handle = DMA_ERROR_CODE;
@@ -1460,7 +1522,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                        !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               prot = __dma_direction_to_prot(dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, prot);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
@@ -1665,19 +1729,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       switch (dir) {
-       case DMA_BIDIRECTIONAL:
-               prot = IOMMU_READ | IOMMU_WRITE;
-               break;
-       case DMA_TO_DEVICE:
-               prot = IOMMU_READ;
-               break;
-       case DMA_FROM_DEVICE:
-               prot = IOMMU_WRITE;
-               break;
-       default:
-               prot = 0;
-       }
+       prot = __dma_direction_to_prot(dir);
 
        ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
        if (ret < 0)
index 83cb3ac27095146f3f60c04047c6b212856a73b2..8e0e52eb76b57d7f9d4208fafbcdf024be369c75 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/system_info.h>
 
 pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
        unsigned long addr, end;
        unsigned long next;
 
-       addr = virt_to_phys(text_start);
-       end = virt_to_phys(text_end);
+       addr = virt_to_idmap(text_start);
+       end = virt_to_idmap(text_end);
+       pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
 
        prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
@@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
        if (!idmap_pgd)
                return -ENOMEM;
 
-       pr_info("Setting up static identity map for 0x%p - 0x%p\n",
-               __idmap_text_start, __idmap_text_end);
        identity_mapping_add(idmap_pgd, __idmap_text_start,
                             __idmap_text_end, 0);
 
index febaee7ca57be76487290a2ac45c6cf74e53759c..2a3fa425c52c4062addaf897951d3b8265819764 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/nodemask.h>
 #include <linux/initrd.h>
 #include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
@@ -218,6 +217,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * so a successful GFP_DMA allocation will always satisfy this.
  */
 phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
@@ -240,6 +240,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
                arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
        } else
                arm_dma_limit = 0xffffffff;
+       arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
 #endif
 }
 
@@ -379,8 +380,6 @@ void __init arm_memblock_init(struct meminfo *mi,
        if (mdesc->reserve)
                mdesc->reserve();
 
-       early_init_dt_scan_reserved_mem();
-
        /*
         * reserve memory for DMA contigouos allocations,
         * must come from DMA area inside low memory
@@ -424,12 +423,10 @@ void __init bootmem_init(void)
         * This doesn't seem to be used by the Linux memory manager any
         * more, but is used by ll_rw_block.  If we can get rid of it, we
         * also get rid of some of the stuff above as well.
-        *
-        * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
-        * the system, not the maximum PFN.
         */
-       max_low_pfn = max_low - PHYS_PFN_OFFSET;
-       max_pfn = max_high - PHYS_PFN_OFFSET;
+       min_low_pfn = min;
+       max_low_pfn = max_low;
+       max_pfn = max_high;
 }
 
 /*
@@ -535,7 +532,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
 static void __init free_highpages(void)
 {
 #ifdef CONFIG_HIGHMEM
-       unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+       unsigned long max_low = max_low_pfn;
        struct memblock_region *mem, *res;
 
        /* set highmem page free */
index d5a4e9ad8f0f68549947100081e3b9f8ac2e1b3a..d5a982d15a88e2a37a524267f31633f497aa0d4b 100644 (file)
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
 
 #ifdef CONFIG_ZONE_DMA
 extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
 #else
 #define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index 0c6356255fe31122f2527a0a2947439e69b0e953..d27158c38eb0b190b869e028b93d8265fb90969e 100644 (file)
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
 }
 
 /*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
  */
 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 {
-       return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+       return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
 }
 
 #ifdef CONFIG_STRICT_DEVMEM
index b1d17eeb59b895cd429e762d082d6c5c56c3ff57..78eeeca78f5ab331707fcd73b4956c503c2d880b 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/highmem.h>
 #include <asm/system_info.h>
 #include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
        }
 }
 
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+                             struct proc_info_list *procinfo)
+{
+       pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+       unsigned long map_start, map_end;
+       pgd_t *pgd0, *pgdk;
+       pud_t *pud0, *pudk, *pud_start;
+       pmd_t *pmd0, *pmdk;
+       phys_addr_t phys;
+       int i;
+
+       if (!(mdesc->init_meminfo))
+               return;
+
+       /* remap kernel code and data */
+       map_start = init_mm.start_code;
+       map_end   = init_mm.brk;
+
+       /* get a handle on things... */
+       pgd0 = pgd_offset_k(0);
+       pud_start = pud0 = pud_offset(pgd0, 0);
+       pmd0 = pmd_offset(pud0, 0);
+
+       pgdk = pgd_offset_k(map_start);
+       pudk = pud_offset(pgdk, map_start);
+       pmdk = pmd_offset(pudk, map_start);
+
+       mdesc->init_meminfo();
+
+       /* Run the patch stub to update the constants */
+       fixup_pv_table(&__pv_table_begin,
+               (&__pv_table_end - &__pv_table_begin) << 2);
+
+       /*
+        * Cache cleaning operations for self-modifying code
+        * We should clean the entries by MVA but running a
+        * for loop over every pv_table entry pointer would
+        * just complicate the code.
+        */
+       flush_cache_louis();
+       dsb();
+       isb();
+
+       /* remap level 1 table */
+       for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+               set_pud(pud0,
+                       __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+               pmd0 += PTRS_PER_PMD;
+       }
+
+       /* remap pmds for kernel mapping */
+       phys = __pa(map_start) & PMD_MASK;
+       do {
+               *pmdk++ = __pmd(phys | pmdprot);
+               phys += PMD_SIZE;
+       } while (phys < map_end);
+
+       flush_cache_all();
+       cpu_switch_mm(pgd0, &init_mm);
+       cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+       local_flush_bp_all();
+       local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+                             struct proc_info_list *procinfo)
+{
+       if (mdesc->init_meminfo)
+               mdesc->init_meminfo();
+}
+
+#endif
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
index f50d223a0bd31271ed73beabd60679c137ccb440..99b44e0e8d866983dd60d4b8324bf29010679e77 100644 (file)
@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 1a6bfe954d4926de3a8dcee10fdbe0b0698279b3..835c559786bda923fe0cb8616b3031932a86bb21 100644 (file)
@@ -6,13 +6,6 @@ config FRAME_POINTER
        bool
        default y
 
-config DEBUG_STACK_USAGE
-       bool "Enable stack utilization instrumentation"
-       depends on DEBUG_KERNEL
-       help
-         Enables the display of the minimum amount of free stack which each
-         task has ever had available in the sysrq-T output.
-
 config EARLY_PRINTK
        bool "Early printk support"
        default y
index 5b3e83217b03b1e255c23347fcc4d06859cba9c1..31c81e9b792e21b5078ada9f4299fc55e3071f86 100644 (file)
@@ -42,7 +42,7 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
-# CONFIG_BLK_DEV is not set
+CONFIG_BLK_DEV=y
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -72,6 +72,7 @@ CONFIG_LOGO=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_FUSE_FS=y
@@ -90,3 +91,5 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 # CONFIG_FTRACE is not set
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_BLK=y
index 6d4482fa35bcbc183bf64d9239a4a5a87bb86705..e2950b098e76f68803559ca264d7bed94ffa4789 100644 (file)
@@ -43,6 +43,6 @@
                                 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
                                 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
 
-extern unsigned int elf_hwcap;
+extern unsigned long elf_hwcap;
 #endif
 #endif
index edb3d5c73a3232c35532bd5e489add1cabf7201c..7ecc2b23882e44c62055ba2a57d49833ae119850 100644 (file)
@@ -166,9 +166,10 @@ do {                                                                       \
 
 #define get_user(x, ptr)                                               \
 ({                                                                     \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
        might_fault();                                                  \
-       access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ?                 \
-               __get_user((x), (ptr)) :                                \
+       access_ok(VERIFY_READ, __p, sizeof(*__p)) ?                     \
+               __get_user((x), __p) :                                  \
                ((x) = 0, -EFAULT);                                     \
 })
 
@@ -227,9 +228,10 @@ do {                                                                       \
 
 #define put_user(x, ptr)                                               \
 ({                                                                     \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
        might_fault();                                                  \
-       access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
-               __put_user((x), (ptr)) :                                \
+       access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?                    \
+               __put_user((x), __p) :                                  \
                -EFAULT;                                                \
 })
 
index 1f2e4d5a5c0fd65ec86d5f487256dc7de9c4584e..bb785d23dbde8764d1a53df79ef4a7b96dc48966 100644 (file)
@@ -80,8 +80,10 @@ void fpsimd_thread_switch(struct task_struct *next)
 
 void fpsimd_flush_thread(void)
 {
+       preempt_disable();
        memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
        fpsimd_load_state(&current->thread.fpsimd_state);
+       preempt_enable();
 }
 
 #ifdef CONFIG_KERNEL_MODE_NEON
index 57fb55c44c901c19c7264f02ff44d6c481fadc56..7ae8a1f00c3c82bf256f716aee79aaf24fac8f1b 100644 (file)
@@ -143,15 +143,26 @@ void machine_restart(char *cmd)
 
 void __show_regs(struct pt_regs *regs)
 {
-       int i;
+       int i, top_reg;
+       u64 lr, sp;
+
+       if (compat_user_mode(regs)) {
+               lr = regs->compat_lr;
+               sp = regs->compat_sp;
+               top_reg = 12;
+       } else {
+               lr = regs->regs[30];
+               sp = regs->sp;
+               top_reg = 29;
+       }
 
        show_regs_print_info(KERN_DEFAULT);
        print_symbol("PC is at %s\n", instruction_pointer(regs));
-       print_symbol("LR is at %s\n", regs->regs[30]);
+       print_symbol("LR is at %s\n", lr);
        printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
-              regs->pc, regs->regs[30], regs->pstate);
-       printk("sp : %016llx\n", regs->sp);
-       for (i = 29; i >= 0; i--) {
+              regs->pc, lr, regs->pstate);
+       printk("sp : %016llx\n", sp);
+       for (i = top_reg; i >= 0; i--) {
                printk("x%-2d: %016llx ", i, regs->regs[i]);
                if (i % 2 == 0)
                        printk("\n");
index 12ad8f3d0cfd32c4f5409a0b8f29b499b7f5cf80..055cfb80e05c5d8d352880cb6db85d2cdea38241 100644 (file)
@@ -57,7 +57,7 @@
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
 
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
 
 static const char *cpu_name;
index 6d6acf153bffb276b5fa9affd87e48a1cb17a318..c23751b0612033f56533955788eabefa6c5fd39e 100644 (file)
@@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
        force_sig_info(sig, &si, tsk);
 }
 
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->active_mm;
index 8ae80a18e8ecbf18acd29a0aed49c70d5ecbdbea..19da91e0cd278911ec8400449d199325e528f00b 100644 (file)
@@ -35,7 +35,7 @@
  */
 ENTRY(__cpu_flush_user_tlb_range)
        vma_vm_mm x3, x2                        // get vma->vm_mm
-       mmid    x3, x3                          // get vm_mm->context.id
+       mmid    w3, x3                          // get vm_mm->context.id
        dsb     sy
        lsr     x0, x0, #12                     // align address
        lsr     x1, x1, #12
index d22af851f3f638b09fdc4394a05e2cebf8a10201..fd7980743890b2ebcf5ff9b5ca40fc4ef83de220 100644 (file)
@@ -1,5 +1,19 @@
 
 generic-y      += clkdev.h
+generic-y       += cputime.h
+generic-y       += delay.h
+generic-y       += device.h
+generic-y       += div64.h
+generic-y       += emergency-restart.h
 generic-y      += exec.h
-generic-y      += trace_clock.h
+generic-y       += futex.h
+generic-y       += irq_regs.h
 generic-y      += param.h
+generic-y       += local.h
+generic-y       += local64.h
+generic-y       += percpu.h
+generic-y       += scatterlist.h
+generic-y       += sections.h
+generic-y       += topology.h
+generic-y      += trace_clock.h
+generic-y       += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644 (file)
index e87e0f8..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_CPUTIME_H
-#define __ASM_AVR32_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644 (file)
index 9670e12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644 (file)
index d8f9872..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644 (file)
index d7ddd4f..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_DIV64_H
-#define __ASM_AVR32_DIV64_H
-
-#include <asm-generic/div64.h>
-
-#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 3e7e014..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
-#define __ASM_AVR32_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644 (file)
index 10419f1..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_FUTEX_H
-#define __ASM_AVR32_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644 (file)
index 1c16196..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_LOCAL_H
-#define __ASM_AVR32_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644 (file)
index 69227b4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_PERCPU_H
-#define __ASM_AVR32_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644 (file)
index a5902d9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SCATTERLIST_H
-#define __ASM_AVR32_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644 (file)
index aa14252..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SECTIONS_H
-#define __ASM_AVR32_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644 (file)
index 5b766cb..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_TOPOLOGY_H
-#define __ASM_AVR32_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644 (file)
index 99c87aa..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XOR_H
-#define _ASM_XOR_H
-
-#include <asm-generic/xor.h>
-
-#endif
index 11c4259c62fb146b8139b7045958f68d6adc3765..4399364214349674999c872ca4779c7089c22160 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_AVR32_SOCKET_H */
index c2731003edef556c18cd137f4a0b1aac5e770cc3..42a53e740a7ee1c93044a01742e93250dc919346 100644 (file)
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                memset(childregs, 0, sizeof(struct pt_regs));
                p->thread.cpu_context.r0 = arg;
                p->thread.cpu_context.r1 = usp; /* fn */
-               p->thread.cpu_context.r2 = syscall_return;
+               p->thread.cpu_context.r2 = (unsigned long)syscall_return;
                p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
                childregs->sr = MODE_SUPERVISOR;
        } else {
index 869a1c6ffeee944a64608d996877a78ee3499245..12f828ad5058d09158f8d3e2007b76a7a42cbb4a 100644 (file)
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
        case CLOCK_EVT_MODE_SHUTDOWN:
                sysreg_write(COMPARE, 0);
                pr_debug("%s: stop\n", evdev->name);
-               cpu_idle_poll_ctrl(false);
+               if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
+                   evdev->mode == CLOCK_EVT_MODE_RESUME) {
+                       /*
+                        * Only disable idle poll if we have forced that
+                        * in a previous call.
+                        */
+                       cpu_idle_poll_ctrl(false);
+               }
                break;
        default:
                BUG();
index f78c9a2c7e281f2d441a40719e6735226231ceaa..eb382aedd9a232c9d85173356bf4ed44d33fb6b5 100644 (file)
@@ -1429,7 +1429,6 @@ source "drivers/cpufreq/Kconfig"
 config BFIN_CPU_FREQ
        bool
        depends on CPU_FREQ
-       select CPU_FREQ_TABLE
        default y
 
 config CPU_VOLTAGE
index 957dd00ea561ce3881a0af87394d349fab04f449..b4f77258caccf5fa9de1231e6ab4b1716ddc5ac6 100644 (file)
@@ -105,10 +105,6 @@ menu "Processor type and features"
 
 source "arch/c6x/platforms/Kconfig"
 
-config TMS320C6X_CACHES_ON
-       bool "L2 cache support"
-       default y
-
 config KERNEL_RAM_BASE_ADDRESS
        hex "Virtual address of memory base"
        default 0xe0000000 if SOC_TMS320C6455
index 02380bed189c6fda4920412a18f1baab6203d1c1..9c957c81c6885acd595c42d1c470a1390cef6feb 100644 (file)
@@ -130,13 +130,11 @@ config SVINTO_SIM
 
 config ETRAXFS
        bool "ETRAX-FS-V32"
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Support CRIS V32.
 
 config CRIS_MACH_ARTPEC3
         bool "ARTPEC-3"
-       select CPU_FREQ_TABLE if CPU_FREQ
         help
           Support Axis ARTPEC-3.
 
index eb723e51554e559ef73d1196a455a96583a5cf51..13829aaaeec565b8e53726e95fcf4e7fde638c25 100644 (file)
@@ -78,6 +78,8 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
 
index f0cb1c3411638e7b33bee915739881635c59c9bc..5d4299762426b108057a33faf71b85efdfe4f74b 100644 (file)
@@ -76,5 +76,7 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
deleted file mode 100644 (file)
index 24b1dc2..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-config H8300
-       bool
-       default y
-       select HAVE_IDE
-       select GENERIC_ATOMIC64
-       select HAVE_UID16
-       select VIRT_TO_BUS
-       select ARCH_WANT_IPC_PARSE_VERSION
-       select GENERIC_IRQ_SHOW
-       select GENERIC_CPU_DEVICES
-       select MODULES_USE_ELF_RELA
-       select OLD_SIGSUSPEND3
-       select OLD_SIGACTION
-       select HAVE_UNDERSCORE_SYMBOL_PREFIX
-
-config MMU
-       bool
-       default n
-
-config SWAP
-       bool
-       default n
-
-config ZONE_DMA
-       bool
-       default y
-
-config FPU
-       bool
-       default n
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U32
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U64
-       bool
-       default n
-
-config GENERIC_HWEIGHT
-       bool
-       default y
-
-config GENERIC_CALIBRATE_DELAY
-       bool
-       default y
-
-config GENERIC_BUG
-        bool
-        depends on BUG
-
-config TIME_LOW_RES
-       bool
-       default y
-
-config NO_IOPORT
-       def_bool y
-
-config NO_DMA
-       def_bool y
-
-config ISA
-       bool
-       default y
-
-config PCI
-       bool
-       default n
-
-config HZ
-       int
-       default 100
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-source "arch/h8300/Kconfig.cpu"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "arch/h8300/Kconfig.ide"
-
-source "fs/Kconfig"
-
-source "arch/h8300/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
deleted file mode 100644 (file)
index cdee771..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-menu "Processor type and features"
-
-choice
-       prompt "H8/300 platform"
-       default H8300H_GENERIC
-
-config H8300H_GENERIC
-       bool "H8/300H Generic"
-       help
-         H8/300H CPU Generic Hardware Support
-
-config H8300H_AKI3068NET
-       bool "AE-3068/69"
-       select H83068
-       help
-         AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support
-         More Information. (Japanese Only)
-         <http://akizukidenshi.com/catalog/default.aspx>
-         AE-3068/69 Evaluation Board Support
-         More Information.
-         <http://www.microtronique.com/ae3069lan.htm>
-
-config H8300H_H8MAX
-       bool "H8MAX"
-       select H83068
-       help
-         H8MAX Evaluation Board Support
-         More Information. (Japanese Only)
-         <http://strawberry-linux.com/h8/index.html>
-
-config H8300H_SIM
-       bool "H8/300H Simulator"
-       select H83007
-       help
-         GDB Simulator Support
-         More Information.
-         <http://sourceware.org/sid/>
-
-config H8S_GENERIC
-       bool "H8S Generic"
-       help
-         H8S CPU Generic Hardware Support
-
-config H8S_EDOSK2674
-       bool "EDOSK-2674"
-       select H8S2678
-       help
-         Renesas EDOSK-2674 Evaluation Board Support
-         More Information.
-         <http://www.azpower.com/H8-uClinux/index.html>
-         <http://www.renesas.eu/products/tools/introductory_evaluation_tools/evaluation_development_os_kits/edosk2674r/edosk2674r_software_tools_root.jsp>
-
-config H8S_SIM
-       bool "H8S Simulator"
-       help
-         GDB Simulator Support
-         More Information.
-         <http://sourceware.org/sid/>
-
-endchoice
-
-choice
-       prompt "CPU Selection"
-
-config H83002
-       bool "H8/3001,3002,3003"
-       depends on BROKEN
-       select CPU_H8300H
-
-config H83007
-       bool "H8/3006,3007"
-       select CPU_H8300H
-
-config H83048
-       bool "H8/3044,3045,3046,3047,3048,3052"
-       depends on BROKEN
-       select CPU_H8300H
-
-config H83068
-       bool "H8/3065,3066,3067,3068,3069"
-       select CPU_H8300H
-
-config H8S2678
-       bool "H8S/2670,2673,2674R,2675,2676"
-       select CPU_H8S
-
-endchoice
-
-config CPU_CLOCK
-       int "CPU Clock Frequency (/1KHz)"
-       default "20000"
-       help
-         CPU Clock Frequency divide to 1000
-
-choice
-       prompt "Kernel executes from"
-       ---help---
-         Choose the memory type that the kernel will be running in.
-
-config RAMKERNEL
-       bool "RAM"
-       help
-         The kernel will be resident in RAM when running.
-
-config ROMKERNEL
-       bool "ROM"
-       help
-         The kernel will be resident in FLASH/ROM when running.
-endchoice
-
-
-config CPU_H8300H
-       bool
-       depends on (H83002 || H83007 || H83048 || H83068)
-       default y
-
-config CPU_H8S
-       bool
-       depends on H8S2678
-       default y
-
-choice
-       prompt "Timer"
-config H8300_TIMER8
-       bool "8bit timer (2ch cascade)"
-       depends on (H83007 || H83068 || H8S2678)
-
-config H8300_TIMER16
-       bool "16bit timer"
-       depends on (H83007 || H83068)
-
-config H8300_ITU
-       bool "ITU"
-       depends on (H83002 || H83048)
-
-config H8300_TPU
-       bool "TPU"
-       depends on H8S2678
-endchoice
-
-if H8300_TIMER8
-choice
-       prompt "Timer Channel"
-config H8300_TIMER8_CH0
-       bool "Channel 0"
-config H8300_TIMER8_CH2
-       bool "Channel 2"
-       depends on CPU_H8300H
-endchoice
-endif
-
-config H8300_TIMER16_CH
-       int "16bit timer channel (0 - 2)"
-       depends on H8300_TIMER16
-       range 0 2
-
-config H8300_ITU_CH
-       int "ITU channel"
-       depends on H8300_ITU
-       range 0 4
-
-config H8300_TPU_CH
-       int "TPU channel"
-       depends on H8300_TPU
-       range 0 4
-
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
-endmenu
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
deleted file mode 100644 (file)
index e8d1b23..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config FULLDEBUG
-       bool "Full Symbolic/Source Debugging support"
-       help
-         Enable debugging symbols on kernel build.
-
-config HIGHPROFILE
-       bool "Use fast second timer for profiling"
-       help
-         Use a fast secondary clock to produce profiling information.
-
-config NO_KERNEL_MSG
-       bool "Suppress Kernel BUG Messages"
-       help
-         Do not output any debug BUG messages within the kernel.
-
-config GDB_MAGICPRINT
-       bool "Message Output for GDB MagicPrint service"
-       depends on (H8300H_SIM || H8S_SIM)
-       help
-         kernel messages output using MagicPrint service from GDB
-
-config SYSCALL_PRINT
-       bool "SystemCall trace print"
-       help
-         output history of systemcall
-
-config GDB_DEBUG
-       bool "Use gdb stub"
-       depends on (!H8300H_SIM && !H8S_SIM)
-       help
-         gdb stub exception support
-
-config SH_STANDARD_BIOS
-       bool "Use gdb protocol serial console"
-       depends on (!H8300H_SIM && !H8S_SIM)
-       help
-         serial console output using GDB protocol.
-         Require eCos/RedBoot
-
-config DEFAULT_CMDLINE
-       bool "Use builtin commandline"
-       default n
-       help
-         builtin kernel commandline enabled.
-
-config KERNEL_COMMAND
-       string "Buildin command string"
-       depends on DEFAULT_CMDLINE
-       help
-         builtin kernel commandline strings.
-
-config BLKDEV_RESERVE
-       bool "BLKDEV Reserved Memory"
-       default n
-       help
-         Reserved BLKDEV area.
-
-config BLKDEV_RESERVE_ADDRESS
-       hex 'start address'
-       depends on BLKDEV_RESERVE
-       help
-         BLKDEV start address.
-
-endmenu
diff --git a/arch/h8300/Kconfig.ide b/arch/h8300/Kconfig.ide
deleted file mode 100644 (file)
index a38a630..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# uClinux H8/300 Target Board Selection Menu (IDE)
-
-if (H8300H_AKI3068NET)
-menu "IDE Extra configuration"
-
-config H8300_IDE_BASE
-       hex "IDE register base address"
-       depends on IDE
-       default 0
-       help
-         IDE registers base address
-
-config H8300_IDE_ALT
-       hex "IDE register alternate address"
-       depends on IDE
-       default 0
-       help
-         IDE alternate registers address
-
-config H8300_IDE_IRQ
-       int "IDE IRQ no"
-       depends on IDE
-       default 0
-       help
-         IDE use IRQ no
-endmenu
-endif
-
-if (H8300H_H8MAX)
-config H8300_IDE_BASE
-       hex
-       depends on IDE
-       default 0x200000
-
-config H8300_IDE_ALT
-       hex
-       depends on IDE
-       default 0x60000c
-
-config H8300_IDE_IRQ
-       int
-       depends on IDE
-       default 5
-endif
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
deleted file mode 100644 (file)
index a556447..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# arch/h8300/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# (C) Copyright 2002,2003 Yoshinori Sato <ysato@users.sourceforge.jp>
-#
-
-platform-$(CONFIG_CPU_H8300H)  := h8300h
-platform-$(CONFIG_CPU_H8S)     := h8s
-PLATFORM := $(platform-y)
-
-board-$(CONFIG_H8300H_GENERIC)         := generic
-board-$(CONFIG_H8300H_AKI3068NET)      := aki3068net
-board-$(CONFIG_H8300H_H8MAX)           := h8max
-board-$(CONFIG_H8300H_SIM)             := generic
-board-$(CONFIG_H8S_GENERIC)            := generic
-board-$(CONFIG_H8S_EDOSK2674)          := edosk2674
-board-$(CONFIG_H8S_SIM)                        := generic
-BOARD := $(board-y)
-
-model-$(CONFIG_RAMKERNEL)      := ram
-model-$(CONFIG_ROMKERNEL)      := rom
-MODEL := $(model-y)
-
-cflags-$(CONFIG_CPU_H8300H)    := -mh
-ldflags-$(CONFIG_CPU_H8300H)   := -mh8300helf
-cflags-$(CONFIG_CPU_H8S)       := -ms
-ldflags-$(CONFIG_CPU_H8S)      := -mh8300self
-
-KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CFLAGS += -mint32 -fno-builtin
-KBUILD_CFLAGS += -g
-KBUILD_CFLAGS += -D__linux__
-KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
-KBUILD_AFLAGS += -DPLATFORM=$(PLATFORM) -DMODEL=$(MODEL) $(cflags-y)
-LDFLAGS += $(ldflags-y)
-
-CROSS_COMPILE = h8300-elf-
-LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
-head-y := arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/crt0_$(MODEL).o
-
-core-y += arch/$(ARCH)/kernel/ \
-          arch/$(ARCH)/mm/
-ifdef PLATFORM
-core-y += arch/$(ARCH)/platform/$(PLATFORM)/ \
-          arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/
-endif
-
-libs-y += arch/$(ARCH)/lib/ $(LIBGCC)
-
-boot := arch/h8300/boot
-
-export MODEL PLATFORM BOARD
-
-archmrproper:
-
-archclean:
-       $(Q)$(MAKE) $(clean)=$(boot)
-
-vmlinux.srec vmlinux.bin zImage: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-
-define archhelp
-  @echo  'vmlinux.bin  - Create raw binary'
-  @echo  'vmlinux.srec - Create srec binary'
-  @echo  'zImage       - Compressed kernel image'
-endef
diff --git a/arch/h8300/README b/arch/h8300/README
deleted file mode 100644 (file)
index efa805f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-linux-2.6 for H8/300 README
-Yoshinori Sato <ysato@users.sourceforge.jp>
-
-* Supported CPU
-H8/300H and H8S
-
-* Supported Target
-1.simulator of GDB
-  require patches.
-
-2.AE 3068/AE 3069
-  more information 
-  MICROTRONIQUE <http://www.microtronique.com/>
-  Akizuki Denshi Tsusho Ltd. <http://akizukidenshi.com/> (Japanese Only)
-
-3.H8MAX 
-  see http://ip-sol.jp/h8max/ (Japanese Only)
-
-4.EDOSK2674
-  see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html
-      http://www.uclinux.org/pub/uClinux/ports/h8/HITACHI-EDOSK2674-HOWTO
-      http://www.azpower.com/H8-uClinux/
-
-* Toolchain Version
-gcc-3.1 or higher and patch
-see arch/h8300/tools_patch/README
-binutils-2.12 or higher
-gdb-5.2 or higher
-The environment that can compile a h8300-elf binary is necessary.
-
-* Userland Develop environment
-used h8300-elf toolchains.
-see http://www.uclinux.org/pub/uClinux/ports/h8/
-
-* A few words of thanks
-Porting to H8/300 serieses is support of Information-technology Promotion Agency, Japan.
-I thank support.
-and All developer/user.
diff --git a/arch/h8300/boot/Makefile b/arch/h8300/boot/Makefile
deleted file mode 100644 (file)
index 0bb62e0..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# arch/h8300/boot/Makefile
-
-targets := vmlinux.srec vmlinux.bin zImage
-subdir- := compressed
-
-OBJCOPYFLAGS_vmlinux.srec := -Osrec
-OBJCOPYFLAGS_vmlinux.bin  := -Obinary
-OBJCOPYFLAGS_zImage := -O binary -R .note -R .comment -R .stab -R .stabstr -S
-
-$(obj)/vmlinux.srec $(obj)/vmlinux.bin:  vmlinux FORCE
-       $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready'
-
-$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
-       $(call if_changed,objcopy)
-       @echo 'Kernel: $@ is ready'
-
-$(obj)/compressed/vmlinux: FORCE
-       $(Q)$(MAKE) $(build)=$(obj)/compressed $@
-
-CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec
-
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
deleted file mode 100644 (file)
index a6c98fe..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# linux/arch/sh/boot/compressed/Makefile
-#
-# create a compressed vmlinux image from the original vmlinux
-#
-
-targets                := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
-asflags-y      := -traditional
-
-OBJECTS = $(obj)/head.o $(obj)/misc.o
-
-#
-# IMAGE_OFFSET is the load offset of the compression loader
-# Assign dummy values if these 2 variables are not defined,
-# in order to suppress error message.
-#
-CONFIG_MEMORY_START     ?= 0x00400000
-CONFIG_BOOT_LINK_OFFSET ?= 0x00140000
-IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
-
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds
-
-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
-       $(call if_changed,ld)
-       @:
-
-$(obj)/vmlinux.bin: vmlinux FORCE
-       $(call if_changed,objcopy)
-
-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-       $(call if_changed,gzip)
-
-LDFLAGS_piggy.o := -r --format binary --oformat elf32-h8300 -T
-OBJCOPYFLAGS := -O binary
-
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
-       $(call if_changed,ld)
diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S
deleted file mode 100644 (file)
index 10e9a2d..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *  linux/arch/h8300/boot/compressed/head.S
- *
- *  Copyright (C) 2006 Yoshinori Sato
- */
-
-       .h8300h
-#include <linux/linkage.h>
-
-#define SRAM_START 0xff4000
-
-       .section        .text..startup
-       .global startup
-startup:
-       mov.l   #SRAM_START+0x8000, sp
-       mov.l   #__sbss, er0
-       mov.l   #__ebss, er1
-       sub.l   er0, er1
-       shlr    er1
-       shlr    er1
-       sub.l   er2, er2
-1:
-       mov.l   er2, @er0
-       adds    #4, er0
-       dec.l   #1, er1
-       bne     1b
-       jsr     @_decompress_kernel
-       jmp     @0x400000
-
-       .align  9
-fake_headers_as_bzImage:
-       .word   0
-       .ascii  "HdrS"          ; header signature
-       .word   0x0202          ; header version number (>= 0x0105)
-                               ; or else old loadlin-1.5 will fail)
-       .word   0               ; default_switch
-       .word   0               ; SETUPSEG
-       .word   0x1000
-       .word   0               ; pointing to kernel version string
-       .byte   0               ; = 0, old one (LILO, Loadlin,
-                               ; 0xTV: T=0 for LILO
-                               ;       V = version
-       .byte   1               ; Load flags bzImage=1
-       .word   0x8000          ; size to move, when setup is not
-       .long   0x100000        ; 0x100000 = default for big kernel
-       .long   0               ; address of loaded ramdisk image
-       .long   0               ; its size in bytes
diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c
deleted file mode 100644 (file)
index 4a1e3dd..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * arch/h8300/boot/compressed/misc.c
- *
- * This is a collection of several routines from gzip-1.0.3
- * adapted for Linux.
- *
- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
- *
- * Adapted for h8300 by Yoshinori Sato 2006
- */
-
-#include <asm/uaccess.h>
-
-/*
- * gzip declarations
- */
-
-#define OF(args)  args
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#define memzero(s, n)     memset ((s), 0, (n))
-
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
-
-#define WSIZE 0x8000           /* Window size must be at least 32k, */
-                               /* and a power of two */
-
-static uch *inbuf;          /* input buffer */
-static uch window[WSIZE];    /* Sliding window buffer */
-
-static unsigned insize = 0;  /* valid bytes in inbuf */
-static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0;  /* bytes in output buffer */
-
-/* gzip flag byte */
-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
-#define COMMENT      0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
-#define RESERVED     0xC0 /* bit 6,7:   reserved */
-
-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-
-/* Diagnostic functions */
-#ifdef DEBUG
-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-#  define Trace(x) fprintf x
-#  define Tracev(x) {if (verbose) fprintf x ;}
-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-#  define Assert(cond,msg)
-#  define Trace(x)
-#  define Tracev(x)
-#  define Tracevv(x)
-#  define Tracec(c,x)
-#  define Tracecv(c,x)
-#endif
-
-static int  fill_inbuf(void);
-static void flush_window(void);
-static void error(char *m);
-
-extern char input_data[];
-extern int input_len;
-
-static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
-
-static void error(char *m);
-
-int puts(const char *);
-
-extern int _end;
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#define HEAP_SIZE             0x10000
-
-#include "../../../../lib/inflate.c"
-
-#define SCR *((volatile unsigned char *)0xffff8a)
-#define TDR *((volatile unsigned char *)0xffff8b)
-#define SSR *((volatile unsigned char *)0xffff8c)
-
-int puts(const char *s)
-{
-       return 0;
-}
-
-void* memset(void* s, int c, size_t n)
-{
-       int i;
-       char *ss = (char*)s;
-
-       for (i=0;i<n;i++) ss[i] = c;
-       return s;
-}
-
-void* memcpy(void* __dest, __const void* __src,
-                           size_t __n)
-{
-       int i;
-       char *d = (char *)__dest, *s = (char *)__src;
-
-       for (i=0;i<__n;i++) d[i] = s[i];
-       return __dest;
-}
-
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- */
-static int fill_inbuf(void)
-{
-       if (insize != 0) {
-               error("ran out of input data");
-       }
-
-       inbuf = input_data;
-       insize = input_len;
-       inptr = 1;
-       return inbuf[0];
-}
-
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void flush_window(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in, *out, ch;
-
-    in = window;
-    out = &output_data[output_ptr];
-    for (n = 0; n < outcnt; n++) {
-           ch = *out++ = *in++;
-           c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    output_ptr += (ulg)outcnt;
-    outcnt = 0;
-}
-
-static void error(char *x)
-{
-       puts("\n\n");
-       puts(x);
-       puts("\n\n -- System halted");
-
-       while(1);       /* Halt */
-}
-
-#define STACK_SIZE (4096)
-long user_stack [STACK_SIZE];
-long* stack_start = &user_stack[STACK_SIZE];
-
-void decompress_kernel(void)
-{
-       output_data = 0;
-       output_ptr = (unsigned long)0x400000;
-       free_mem_ptr = (unsigned long)&_end;
-       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
-       makecrc();
-       puts("Uncompressing Linux... ");
-       gunzip();
-       puts("Ok, booting the kernel.\n");
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds
deleted file mode 100644 (file)
index a0a3a0e..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-SECTIONS
-{
-        .text :
-        {
-        __stext = . ;
-       __text = .;
-              *(.text..startup)
-              *(.text)
-        __etext = . ;
-        }
-
-       .rodata :
-       {
-               *(.rodata)
-       }
-        .data :
-
-        {
-        __sdata = . ;
-        ___data_start = . ;
-                *(.data.*)
-       }
-        .bss :
-        {
-        . = ALIGN(0x4) ;
-        __sbss = . ;
-                *(.bss*)
-        . = ALIGN(0x4) ;
-        __ebss = . ;
-        __end = . ;
-        }
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.scr b/arch/h8300/boot/compressed/vmlinux.scr
deleted file mode 100644 (file)
index a0f6962..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-SECTIONS
-{
-  .data : {
-       _input_len = .;
-       LONG(_input_data_end - _input_data) _input_data = .;
-       *(.data)
-       _input_data_end = .;
-       }
-}
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
deleted file mode 100644 (file)
index 042425a..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-# CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_TIMERFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLOB=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_H83007=y
-CONFIG_BINFMT_FLAT=y
-CONFIG_BINFMT_ZFLAT=y
-CONFIG_BINFMT_MISC=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_RAM=y
-CONFIG_MTD_ROM=y
-CONFIG_MTD_UCLINUX=y
-# CONFIG_BLK_DEV is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_ROMFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
deleted file mode 100644 (file)
index 8ada3cf..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-
-generic-y += clkdev.h
-generic-y += exec.h
-generic-y += linkage.h
-generic-y += mmu.h
-generic-y += module.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h
deleted file mode 100644 (file)
index d370ee3..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644 (file)
index 40901e3..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-
-/*
- * Atomic operations that C can't guarantee us.  Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v)         (*(volatile int *)&(v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = i)
-
-#include <linux/kernel.h>
-
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       ret = v->counter += i;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_add(i, v) atomic_add_return(i, v)
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       ret = v->counter -= i;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       v->counter++;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret == 0;
-}
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       int ret;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       ret = v->counter;
-       if (likely(ret == old))
-               v->counter = new;
-       local_irq_restore(flags);
-       return ret;
-}
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int ret;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       ret = v->counter;
-       if (ret != u)
-               v->counter += a;
-       local_irq_restore(flags);
-       return ret;
-}
-
-static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-       __asm__ __volatile__("stc ccr,r1l\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,er0\n\t"
-                            "and.l %1,er0\n\t"
-                            "mov.l er0,%0\n\t"
-                            "ldc r1l,ccr" 
-                             : "=m" (*v) : "g" (~(mask)) :"er0","er1");
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-       __asm__ __volatile__("stc ccr,r1l\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,er0\n\t"
-                            "or.l %1,er0\n\t"
-                            "mov.l er0,%0\n\t"
-                            "ldc r1l,ccr" 
-                             : "=m" (*v) : "g" (mask) :"er0","er1");
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h
deleted file mode 100644 (file)
index 9e0aa9f..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _H8300_BARRIER_H
-#define _H8300_BARRIER_H
-
-#define nop()  asm volatile ("nop"::)
-
-/*
- * Force strict CPU ordering.
- * Not really required on H8...
- */
-#define mb()   asm volatile (""   : : :"memory")
-#define rmb()  asm volatile (""   : : :"memory")
-#define wmb()  asm volatile (""   : : :"memory")
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
-#endif
-
-#endif /* _H8300_BARRIER_H */
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
deleted file mode 100644 (file)
index eb34e0c..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef _H8300_BITOPS_H
-#define _H8300_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Copyright 2002, Yoshinori Sato
- */
-
-#include <linux/compiler.h>
-
-#ifdef __KERNEL__
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-/*
- * Function prototypes to keep gcc -Wall happy
- */
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
-       unsigned long result;
-
-       result = -1;
-       __asm__("1:\n\t"
-               "shlr.l %2\n\t"
-               "adds #1,%0\n\t"
-               "bcs 1b"
-               : "=r" (result)
-               : "0"  (result),"r" (word));
-       return result;
-}
-
-#define H8300_GEN_BITOP_CONST(OP,BIT)                      \
-       case BIT:                                           \
-       __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \
-       break;
-
-#define H8300_GEN_BITOP(FNAME,OP)                                    \
-static __inline__ void FNAME(int nr, volatile unsigned long* addr)    \
-{                                                                    \
-       volatile unsigned char *b_addr;                               \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);    \
-       if (__builtin_constant_p(nr)) {                               \
-               switch(nr & 7) {                                      \
-                       H8300_GEN_BITOP_CONST(OP,0)                   \
-                       H8300_GEN_BITOP_CONST(OP,1)                   \
-                       H8300_GEN_BITOP_CONST(OP,2)                   \
-                       H8300_GEN_BITOP_CONST(OP,3)                   \
-                       H8300_GEN_BITOP_CONST(OP,4)                   \
-                       H8300_GEN_BITOP_CONST(OP,5)                   \
-                       H8300_GEN_BITOP_CONST(OP,6)                   \
-                       H8300_GEN_BITOP_CONST(OP,7)                   \
-               }                                                     \
-       } else {                                                      \
-               __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \
-       }                                                             \
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit()     barrier()
-#define smp_mb__after_clear_bit()      barrier()
-
-H8300_GEN_BITOP(set_bit          ,"bset")
-H8300_GEN_BITOP(clear_bit ,"bclr")
-H8300_GEN_BITOP(change_bit,"bnot")
-#define __set_bit(nr,addr)    set_bit((nr),(addr))
-#define __clear_bit(nr,addr)  clear_bit((nr),(addr))
-#define __change_bit(nr,addr) change_bit((nr),(addr))
-
-#undef H8300_GEN_BITOP
-#undef H8300_GEN_BITOP_CONST
-
-static __inline__ int test_bit(int nr, const unsigned long* addr)
-{
-       return (*((volatile unsigned char *)addr + 
-               ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
-}
-
-#define __test_bit(nr, addr) test_bit(nr, addr)
-
-#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT)                      \
-       case BIT:                                                    \
-       __asm__("stc ccr,%w1\n\t"                                    \
-               "orc #0x80,ccr\n\t"                                  \
-               "bld #" #BIT ",@%4\n\t"                              \
-               OP " #" #BIT ",@%4\n\t"                              \
-               "rotxl.l %0\n\t"                                     \
-               "ldc %w1,ccr"                                        \
-               : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr)          \
-               : "0" (retval),"r" (b_addr)                          \
-               : "memory");                                         \
-        break;
-
-#define H8300_GEN_TEST_BITOP_CONST(OP,BIT)                          \
-       case BIT:                                                    \
-       __asm__("bld #" #BIT ",@%3\n\t"                              \
-               OP " #" #BIT ",@%3\n\t"                              \
-               "rotxl.l %0\n\t"                                     \
-               : "=r"(retval),"=m"(*b_addr)                         \
-               : "0" (retval),"r" (b_addr)                          \
-               : "memory");                                         \
-        break;
-
-#define H8300_GEN_TEST_BITOP(FNNAME,OP)                                     \
-static __inline__ int FNNAME(int nr, volatile void * addr)          \
-{                                                                   \
-       int retval = 0;                                              \
-       char ccrsave;                                                \
-       volatile unsigned char *b_addr;                              \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);   \
-       if (__builtin_constant_p(nr)) {                              \
-               switch(nr & 7) {                                     \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,0)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,1)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,2)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,3)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,4)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,5)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,6)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,7)         \
-               }                                                    \
-       } else {                                                     \
-               __asm__("stc ccr,%w1\n\t"                            \
-                       "orc #0x80,ccr\n\t"                          \
-                       "btst %w5,@%4\n\t"                           \
-                       OP " %w5,@%4\n\t"                            \
-                       "beq 1f\n\t"                                 \
-                       "inc.l #1,%0\n"                              \
-                       "1:\n\t"                                     \
-                       "ldc %w1,ccr"                                \
-                       : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr)  \
-                       : "0" (retval),"r" (b_addr),"r"(nr)          \
-                       : "memory");                                 \
-       }                                                            \
-       return retval;                                               \
-}                                                                   \
-                                                                    \
-static __inline__ int __ ## FNNAME(int nr, volatile void * addr)     \
-{                                                                   \
-       int retval = 0;                                              \
-       volatile unsigned char *b_addr;                              \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);   \
-       if (__builtin_constant_p(nr)) {                              \
-               switch(nr & 7) {                                     \
-                       H8300_GEN_TEST_BITOP_CONST(OP,0)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,1)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,2)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,3)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,4)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,5)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,6)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,7)             \
-               }                                                    \
-       } else {                                                     \
-               __asm__("btst %w4,@%3\n\t"                           \
-                       OP " %w4,@%3\n\t"                            \
-                       "beq 1f\n\t"                                 \
-                       "inc.l #1,%0\n"                              \
-                       "1:"                                         \
-                       : "=r"(retval),"=m"(*b_addr)                 \
-                       : "0" (retval),"r" (b_addr),"r"(nr)          \
-                       : "memory");                                 \
-       }                                                            \
-       return retval;                                               \
-}
-
-H8300_GEN_TEST_BITOP(test_and_set_bit,  "bset")
-H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
-H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
-#undef H8300_GEN_TEST_BITOP_CONST
-#undef H8300_GEN_TEST_BITOP_CONST_INT
-#undef H8300_GEN_TEST_BITOP
-
-#include <asm-generic/bitops/ffs.h>
-
-static __inline__ unsigned long __ffs(unsigned long word)
-{
-       unsigned long result;
-
-       result = -1;
-       __asm__("1:\n\t"
-               "shlr.l %2\n\t"
-               "adds #1,%0\n\t"
-               "bcc 1b"
-               : "=r" (result)
-               : "0"(result),"r"(word));
-       return result;
-}
-
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/le.h>
-#include <asm-generic/bitops/ext2-atomic.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
-#endif /* _H8300_BITOPS_H */
diff --git a/arch/h8300/include/asm/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
deleted file mode 100644 (file)
index 5bed7e7..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-/* Nothing for h8300 */
diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h
deleted file mode 100644 (file)
index 1e1be81..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_BUG_H
-#define _H8300_BUG_H
-
-/* always true */
-#define is_valid_bugaddr(addr) (1)
-
-#include <asm-generic/bug.h>
-
-struct pt_regs;
-extern void die(const char *str, struct pt_regs *fp, unsigned long err);
-
-#endif
diff --git a/arch/h8300/include/asm/bugs.h b/arch/h8300/include/asm/bugs.h
deleted file mode 100644 (file)
index 1cb4afb..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- *  include/asm-h8300/bugs.h
- *
- *  Copyright (C) 1994  Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- *     void check_bugs(void);
- */
-
-static void check_bugs(void)
-{
-}
diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
deleted file mode 100644 (file)
index 05887a1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_CACHE_H
-#define __ARCH_H8300_CACHE_H
-
-/* bytes per L1 cache line */
-#define        L1_CACHE_SHIFT  2
-#define        L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
-
-/* m68k-elf-gcc  2.95.2 doesn't like these */
-
-#define __cacheline_aligned
-#define ____cacheline_aligned
-
-#endif
diff --git a/arch/h8300/include/asm/cachectl.h b/arch/h8300/include/asm/cachectl.h
deleted file mode 100644 (file)
index c464022..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _H8300_CACHECTL_H
-#define _H8300_CACHECTL_H
-
-/* Definitions for the cacheflush system call.  */
-
-#define FLUSH_SCOPE_LINE    0  /* Flush a cache line */
-#define FLUSH_SCOPE_PAGE    0  /* Flush a page */
-#define FLUSH_SCOPE_ALL     0  /* Flush the whole cache -- superuser only */
-
-#define FLUSH_CACHE_DATA    0  /* Writeback and flush data cache */
-#define FLUSH_CACHE_INSN    0  /* Flush instruction cache */
-#define FLUSH_CACHE_BOTH    0  /* Flush both caches */
-
-#endif /* _H8300_CACHECTL_H */
diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
deleted file mode 100644 (file)
index 4cf2df2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * (C) Copyright 2002, Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#ifndef _ASM_H8300_CACHEFLUSH_H
-#define _ASM_H8300_CACHEFLUSH_H
-
-/*
- * Cache handling functions
- * No Cache memory all dummy functions
- */
-
-#define flush_cache_all()
-#define        flush_cache_mm(mm)
-#define        flush_cache_dup_mm(mm)          do { } while (0)
-#define        flush_cache_range(vma,a,b)
-#define        flush_cache_page(vma,p,pfn)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define        flush_dcache_page(page)
-#define        flush_dcache_mmap_lock(mapping)
-#define        flush_dcache_mmap_unlock(mapping)
-#define        flush_icache()
-#define        flush_icache_page(vma,page)
-#define        flush_icache_range(start,len)
-#define flush_cache_vmap(start, end)
-#define flush_cache_vunmap(start, end)
-#define        cache_push_v(vaddr,len)
-#define        cache_push(paddr,len)
-#define        cache_clear(paddr,len)
-
-#define        flush_dcache_range(a,b)
-
-#define        flush_icache_user_range(vma,page,addr,len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-
-#endif /* _ASM_H8300_CACHEFLUSH_H */
diff --git a/arch/h8300/include/asm/checksum.h b/arch/h8300/include/asm/checksum.h
deleted file mode 100644 (file)
index 98724e1..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef _H8300_CHECKSUM_H
-#define _H8300_CHECKSUM_H
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
-
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-                                               int len, __wsum sum, int *csum_err);
-
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-
-/*
- *     Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-       __asm__("mov.l %0,er0\n\t"
-               "add.w e0,r0\n\t"
-               "xor.w e0,e0\n\t"
-               "rotxl.w e0\n\t"
-               "add.w e0,r0\n\t"
-               "sub.w e0,e0\n\t"
-               "mov.l er0,%0"
-               : "=r"(sum)
-               : "0"(sum)
-               : "er0");
-       return (__force __sum16)~sum;
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
-                 unsigned short proto, __wsum sum)
-{
-       __asm__ ("sub.l er0,er0\n\t"
-                "add.l %2,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l %3,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l %4,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l er0,%0\n\t"
-                "bcc   1f\n\t"
-                "inc.l #1,%0\n"
-                "1:"
-                : "=&r" (sum)
-                : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto)
-                :"er0");
-       return sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
-                 unsigned short proto, __wsum sum)
-{
-       return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-#endif /* _H8300_CHECKSUM_H */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644 (file)
index cdb203e..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-  unsigned long tmp, flags;
-
-  local_irq_save(flags);
-
-  switch (size) {
-  case 1:
-    __asm__ __volatile__
-    ("mov.b %2,%0\n\t"
-     "mov.b %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  case 2:
-    __asm__ __volatile__
-    ("mov.w %2,%0\n\t"
-     "mov.w %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  case 4:
-    __asm__ __volatile__
-    ("mov.l %2,%0\n\t"
-     "mov.l %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  default:
-    tmp = 0;     
-  }
-  local_irq_restore(flags);
-  return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n)                                              \
-       ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
-                       (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/include/asm/cputime.h b/arch/h8300/include/asm/cputime.h
deleted file mode 100644 (file)
index 092e187..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_CPUTIME_H
-#define __H8300_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __H8300_CPUTIME_H */
diff --git a/arch/h8300/include/asm/current.h b/arch/h8300/include/asm/current.h
deleted file mode 100644 (file)
index 57d74ee..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_CURRENT_H
-#define _H8300_CURRENT_H
-/*
- *     current.h
- *     (C) Copyright 2000, Lineo, David McCullough <davidm@lineo.com>
- *     (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- *
- *     rather than dedicate a register (as the m68k source does), we
- *     just keep a global,  we should probably just change it all to be
- *     current and lose _current_task.
- */
-
-#include <linux/thread_info.h>
-#include <asm/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct *get_current(void)
-{
-       return(current_thread_info()->task);
-}
-
-#define        current get_current()
-
-#endif /* _H8300_CURRENT_H */
diff --git a/arch/h8300/include/asm/dbg.h b/arch/h8300/include/asm/dbg.h
deleted file mode 100644 (file)
index 2c6d1cb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#define DEBUG 1
-#define        BREAK asm volatile ("trap #3")
diff --git a/arch/h8300/include/asm/delay.h b/arch/h8300/include/asm/delay.h
deleted file mode 100644 (file)
index 743beba..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _H8300_DELAY_H
-#define _H8300_DELAY_H
-
-#include <asm/param.h>
-
-/*
- * Copyright (C) 2002 Yoshinori Sato <ysato@sourceforge.jp>
- *
- * Delay routines, using a pre-computed "loops_per_second" value.
- */
-
-static inline void __delay(unsigned long loops)
-{
-       __asm__ __volatile__ ("1:\n\t"
-                             "dec.l #1,%0\n\t"
-                             "bne 1b"
-                             :"=r" (loops):"0"(loops));
-}
-
-/*
- * Use only for very small delays ( < 1 msec).  Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays.  This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)  
- */
-
-extern unsigned long loops_per_jiffy;
-
-static inline void udelay(unsigned long usecs)
-{
-       usecs *= 4295;          /* 2**32 / 1000000 */
-       usecs /= (loops_per_jiffy*HZ);
-       if (usecs)
-               __delay(usecs);
-}
-
-#endif /* _H8300_DELAY_H */
diff --git a/arch/h8300/include/asm/device.h b/arch/h8300/include/asm/device.h
deleted file mode 100644 (file)
index d8f9872..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/h8300/include/asm/div64.h b/arch/h8300/include/asm/div64.h
deleted file mode 100644 (file)
index 6cd978c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/h8300/include/asm/dma.h b/arch/h8300/include/asm/dma.h
deleted file mode 100644 (file)
index 3edbaaa..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _H8300_DMA_H
-#define _H8300_DMA_H 
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#define MAX_DMA_CHANNELS 0
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char *device_id);     /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);      /* release it again */
-#endif /* _H8300_DMA_H */
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
deleted file mode 100644 (file)
index 6db7124..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef __ASMH8300_ELF_H
-#define __ASMH8300_ELF_H
-
-/*
- * ELF register definitions..
- */
-
-#include <asm/ptrace.h>
-#include <asm/user.h>
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef unsigned long elf_fpregset_t;
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_H8_300)
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS      ELFCLASS32
-#define ELF_DATA       ELFDATA2MSB
-#define ELF_ARCH       EM_H8_300
-#if defined(__H8300H__)
-#define ELF_CORE_EFLAGS 0x810000
-#endif
-#if defined(__H8300S__)
-#define ELF_CORE_EFLAGS 0x820000
-#endif
-
-#define ELF_PLAT_INIT(_r)      _r->er1 = 0
-
-#define ELF_EXEC_PAGESIZE      4096
-
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE         0xD0000000UL
-
-/* This yields a mask that user programs can use to figure out what
-   instruction set this cpu supports.  */
-
-#define ELF_HWCAP      (0)
-
-/* This yields a string that ld.so will use to load implementation
-   specific libraries for optimization.  This is more specific in
-   intent than poking at uname or /proc/cpuinfo.  */
-
-#define ELF_PLATFORM  (NULL)
-
-#define R_H8_NONE       0
-#define R_H8_DIR32      1
-#define R_H8_DIR32_28   2
-#define R_H8_DIR32_24   3
-#define R_H8_DIR32_16   4
-#define R_H8_DIR32U     6
-#define R_H8_DIR32U_28  7
-#define R_H8_DIR32U_24  8
-#define R_H8_DIR32U_20  9
-#define R_H8_DIR32U_16 10
-#define R_H8_DIR24     11
-#define R_H8_DIR24_20  12
-#define R_H8_DIR24_16  13
-#define R_H8_DIR24U    14
-#define R_H8_DIR24U_20 15
-#define R_H8_DIR24U_16 16
-#define R_H8_DIR16     17
-#define R_H8_DIR16U    18
-#define R_H8_DIR16S_32 19
-#define R_H8_DIR16S_28 20
-#define R_H8_DIR16S_24 21
-#define R_H8_DIR16S_20 22
-#define R_H8_DIR16S    23
-#define R_H8_DIR8      24
-#define R_H8_DIR8U     25
-#define R_H8_DIR8Z_32  26
-#define R_H8_DIR8Z_28  27
-#define R_H8_DIR8Z_24  28
-#define R_H8_DIR8Z_20  29
-#define R_H8_DIR8Z_16  30
-#define R_H8_PCREL16   31
-#define R_H8_PCREL8    32
-#define R_H8_BPOS      33
-#define R_H8_PCREL32   34
-#define R_H8_GOT32O    35
-#define R_H8_GOT16O    36
-#define R_H8_DIR16A8   59
-#define R_H8_DIR16R8   60
-#define R_H8_DIR24A8   61
-#define R_H8_DIR24R8   62
-#define R_H8_DIR32A16  63
-#define R_H8_ABS32     65
-#define R_H8_ABS32A16 127
-
-#endif
diff --git a/arch/h8300/include/asm/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/h8300/include/asm/fb.h b/arch/h8300/include/asm/fb.h
deleted file mode 100644 (file)
index c7df380..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
-       return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
deleted file mode 100644 (file)
index bd12b31..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * include/asm-h8300/flat.h -- uClinux flat-format executables
- */
-
-#ifndef __H8300_FLAT_H__
-#define __H8300_FLAT_H__
-
-#define        flat_argvp_envp_on_stack()              1
-#define        flat_old_ram_flag(flags)                1
-#define        flat_reloc_valid(reloc, size)           ((reloc) <= (size))
-#define        flat_set_persistent(relval, p)          0
-
-/*
- * on the H8 a couple of the relocations have an instruction in the
- * top byte.  As there can only be 24bits of address space,  we just
- * always preserve that 8bits at the top,  when it isn't an instruction
- * is is 0 (davidm@snapgear.com)
- */
-
-#define        flat_get_relocate_addr(rel)             (rel)
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
-        (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff))
-#define flat_put_addr_at_rp(rp, addr, rel) \
-       put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp)
-
-#endif /* __H8300_FLAT_H__ */
diff --git a/arch/h8300/include/asm/fpu.h b/arch/h8300/include/asm/fpu.h
deleted file mode 100644 (file)
index 4fc416e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* Nothing do */
diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h
deleted file mode 100644 (file)
index 40a8c17..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* empty */
diff --git a/arch/h8300/include/asm/futex.h b/arch/h8300/include/asm/futex.h
deleted file mode 100644 (file)
index 6a332a9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/h8300/include/asm/gpio-internal.h b/arch/h8300/include/asm/gpio-internal.h
deleted file mode 100644 (file)
index a714f0c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _H8300_GPIO_H
-#define _H8300_GPIO_H
-
-#define H8300_GPIO_P1 0
-#define H8300_GPIO_P2 1
-#define H8300_GPIO_P3 2
-#define H8300_GPIO_P4 3
-#define H8300_GPIO_P5 4
-#define H8300_GPIO_P6 5
-#define H8300_GPIO_P7 6
-#define H8300_GPIO_P8 7
-#define H8300_GPIO_P9 8
-#define H8300_GPIO_PA 9
-#define H8300_GPIO_PB 10
-#define H8300_GPIO_PC 11
-#define H8300_GPIO_PD 12
-#define H8300_GPIO_PE 13
-#define H8300_GPIO_PF 14
-#define H8300_GPIO_PG 15
-#define H8300_GPIO_PH 16
-
-#define H8300_GPIO_B7 0x80
-#define H8300_GPIO_B6 0x40
-#define H8300_GPIO_B5 0x20
-#define H8300_GPIO_B4 0x10
-#define H8300_GPIO_B3 0x08
-#define H8300_GPIO_B2 0x04
-#define H8300_GPIO_B1 0x02
-#define H8300_GPIO_B0 0x01
-
-#define H8300_GPIO_INPUT 0
-#define H8300_GPIO_OUTPUT 1
-
-#define H8300_GPIO_RESERVE(port, bits) \
-        h8300_reserved_gpio(port, bits)
-
-#define H8300_GPIO_FREE(port, bits) \
-        h8300_free_gpio(port, bits)
-
-#define H8300_GPIO_DDR(port, bit, dir) \
-        h8300_set_gpio_dir(((port) << 8) | (bit), dir)
-
-#define H8300_GPIO_GETDIR(port, bit) \
-        h8300_get_gpio_dir(((port) << 8) | (bit))
-
-extern int h8300_reserved_gpio(int port, int bits);
-extern int h8300_free_gpio(int port, int bits);
-extern int h8300_set_gpio_dir(int port_bit, int dir);
-extern int h8300_get_gpio_dir(int port_bit);
-extern int h8300_init_gpio(void);
-
-#endif
diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h
deleted file mode 100644 (file)
index c2e1aa0..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __H8300_HARDIRQ_H
-#define __H8300_HARDIRQ_H
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS   8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif
diff --git a/arch/h8300/include/asm/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
deleted file mode 100644 (file)
index d75a5a1..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* Do Nothing */
diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h
deleted file mode 100644 (file)
index c1a8df2..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-#ifndef _H8300_IO_H
-#define _H8300_IO_H
-
-#ifdef __KERNEL__
-
-#include <asm/virtconvert.h>
-
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#elif defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#else
-#error UNKNOWN CPU TYPE
-#endif
-
-
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the m68k architecture, we just read/write the
- * memory location directly.
- */
-/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
- * two accesses to memory, which may be undesirable for some devices.
- */
-
-/*
- * swap functions are sometimes needed to interface little-endian hardware
- */
-
-static inline unsigned short _swapw(volatile unsigned short v)
-{
-#ifndef H8300_IO_NOSWAP
-       unsigned short r;
-       __asm__("xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0"
-               :"=r"(r)
-               :"0"(v));
-       return r;
-#else
-       return v;
-#endif
-}
-
-static inline unsigned long _swapl(volatile unsigned long v)
-{
-#ifndef H8300_IO_NOSWAP
-       unsigned long r;
-       __asm__("xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0\n\t"
-               "xor.w %e0,%f0\n\t"
-               "xor.w %f0,%e0\n\t"
-               "xor.w %e0,%f0\n\t"
-               "xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0"
-               :"=r"(r)
-               :"0"(v));
-       return r;
-#else
-       return v;
-#endif
-}
-
-#define readb(addr) \
-    ({ unsigned char __v = \
-     *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-#define readw(addr) \
-    ({ unsigned short __v = \
-     *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-#define readl(addr) \
-    ({ unsigned long __v = \
-     *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-
-#define writeb(b,addr) (void)((*(volatile unsigned char *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writew(b,addr) (void)((*(volatile unsigned short *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writel(b,addr) (void)((*(volatile unsigned long *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-static inline int h8300_buswidth(unsigned int addr)
-{
-       return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
-}
-
-static inline void io_outsb(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned char  *ap_b = (volatile unsigned char *) addr;
-       volatile unsigned short *ap_w = (volatile unsigned short *) addr;
-       unsigned char *bp = (unsigned char *) buf;
-
-       if(h8300_buswidth(addr) && (addr & 1)) {
-               while (len--)
-                       *ap_w = *bp++;
-       } else {
-               while (len--)
-                       *ap_b = *bp++;
-       }
-}
-
-static inline void io_outsw(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *ap = _swapw(*bp++);
-}
-
-static inline void io_outsl(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *ap = _swapl(*bp++);
-}
-
-static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *ap = *bp++;
-}
-
-static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *ap = *bp++;
-}
-
-static inline void io_insb(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned char  *ap_b;
-       volatile unsigned short *ap_w;
-       unsigned char *bp = (unsigned char *) buf;
-
-       if(h8300_buswidth(addr)) {
-               ap_w = (volatile unsigned short *)(addr & ~1);
-               while (len--)
-                       *bp++ = *ap_w & 0xff;
-       } else {
-               ap_b = (volatile unsigned char *)addr;
-               while (len--)
-                       *bp++ = *ap_b;
-       }
-}
-
-static inline void io_insw(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *bp++ = _swapw(*ap);
-}
-
-static inline void io_insl(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *bp++ = _swapl(*ap);
-}
-
-static inline void io_insw_noswap(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *bp++ = *ap;
-}
-
-static inline void io_insl_noswap(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *bp++ = *ap;
-}
-
-/*
- *     make the short names macros so specific devices
- *     can override them as required
- */
-
-#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c)   memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c)     memcpy((void *)(a),(b),(c))
-
-#define mmiowb()
-
-#define inb(addr)    ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr))
-#define inw(addr)    _swapw(readw(addr))
-#define inl(addr)    _swapl(readl(addr))
-#define outb(x,addr) ((void)((h8300_buswidth(addr) && \
-                      ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr)))
-#define outw(x,addr) ((void) writew(_swapw(x),addr))
-#define outl(x,addr) ((void) writel(_swapl(x),addr))
-
-#define inb_p(addr)    inb(addr)
-#define inw_p(addr)    inw(addr)
-#define inl_p(addr)    inl(addr)
-#define outb_p(x,addr) outb(x,addr)
-#define outw_p(x,addr) outw(x,addr)
-#define outl_p(x,addr) outl(x,addr)
-
-#define outsb(a,b,l) io_outsb(a,b,l)
-#define outsw(a,b,l) io_outsw(a,b,l)
-#define outsl(a,b,l) io_outsl(a,b,l)
-
-#define insb(a,b,l) io_insb(a,b,l)
-#define insw(a,b,l) io_insw(a,b,l)
-#define insl(a,b,l) io_insl(a,b,l)
-
-#define IO_SPACE_LIMIT 0xffffff
-
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_FULL_CACHING             0
-#define IOMAP_NOCACHE_SER              1
-#define IOMAP_NOCACHE_NONSER           2
-#define IOMAP_WRITETHROUGH             3
-
-extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
-extern void __iounmap(void *addr, unsigned long size);
-
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
-}
-static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
-}
-
-extern void iounmap(void *addr);
-
-/* H8/300 internal I/O functions */
-static __inline__ unsigned char ctrl_inb(unsigned long addr)
-{
-       return *(volatile unsigned char*)addr;
-}
-
-static __inline__ unsigned short ctrl_inw(unsigned long addr)
-{
-       return *(volatile unsigned short*)addr;
-}
-
-static __inline__ unsigned long ctrl_inl(unsigned long addr)
-{
-       return *(volatile unsigned long*)addr;
-}
-
-static __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
-{
-       *(volatile unsigned char*)addr = b;
-}
-
-static __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
-{
-       *(volatile unsigned short*)addr = b;
-}
-
-static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
-{
-        *(volatile unsigned long*)addr = b;
-}
-
-static __inline__ void ctrl_bclr(int b, unsigned long addr)
-{
-       if (__builtin_constant_p(b))
-               switch (b) {
-               case 0: __asm__("bclr #0,@%0"::"r"(addr)); break;
-               case 1: __asm__("bclr #1,@%0"::"r"(addr)); break;
-               case 2: __asm__("bclr #2,@%0"::"r"(addr)); break;
-               case 3: __asm__("bclr #3,@%0"::"r"(addr)); break;
-               case 4: __asm__("bclr #4,@%0"::"r"(addr)); break;
-               case 5: __asm__("bclr #5,@%0"::"r"(addr)); break;
-               case 6: __asm__("bclr #6,@%0"::"r"(addr)); break;
-               case 7: __asm__("bclr #7,@%0"::"r"(addr)); break;
-               }
-       else
-               __asm__("bclr %w0,@%1"::"r"(b), "r"(addr));
-}
-
-static __inline__ void ctrl_bset(int b, unsigned long addr)
-{
-       if (__builtin_constant_p(b))
-               switch (b) {
-               case 0: __asm__("bset #0,@%0"::"r"(addr)); break;
-               case 1: __asm__("bset #1,@%0"::"r"(addr)); break;
-               case 2: __asm__("bset #2,@%0"::"r"(addr)); break;
-               case 3: __asm__("bset #3,@%0"::"r"(addr)); break;
-               case 4: __asm__("bset #4,@%0"::"r"(addr)); break;
-               case 5: __asm__("bset #5,@%0"::"r"(addr)); break;
-               case 6: __asm__("bset #6,@%0"::"r"(addr)); break;
-               case 7: __asm__("bset #7,@%0"::"r"(addr)); break;
-               }
-       else
-               __asm__("bset %w0,@%1"::"r"(b), "r"(addr));
-}
-
-/* Pages to physical address... */
-#define page_to_phys(page)      ((page - mem_map) << PAGE_SHIFT)
-#define page_to_bus(page)       ((page - mem_map) << PAGE_SHIFT)
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-#define phys_to_virt(vaddr)    ((void *) (vaddr))
-#define virt_to_phys(vaddr)    ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
-
-#endif /* __KERNEL__ */
-
-#endif /* _H8300_IO_H */
diff --git a/arch/h8300/include/asm/irq.h b/arch/h8300/include/asm/irq.h
deleted file mode 100644 (file)
index 13d7c60..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_IRQ_H_
-#define _H8300_IRQ_H_
-
-#include <asm/ptrace.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define NR_IRQS 64
-#define EXT_IRQ0 12
-#define EXT_IRQ1 13
-#define EXT_IRQ2 14
-#define EXT_IRQ3 15
-#define EXT_IRQ4 16
-#define EXT_IRQ5 17
-#define EXT_IRQ6 18
-#define EXT_IRQ7 19
-#define EXT_IRQS 5
-#define IER_REGS *(volatile unsigned char *)IER
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define NR_IRQS 128
-#define EXT_IRQ0 16
-#define EXT_IRQ1 17
-#define EXT_IRQ2 18
-#define EXT_IRQ3 19
-#define EXT_IRQ4 20
-#define EXT_IRQ5 21
-#define EXT_IRQ6 22
-#define EXT_IRQ7 23
-#define EXT_IRQ8 24
-#define EXT_IRQ9 25
-#define EXT_IRQ10 26
-#define EXT_IRQ11 27
-#define EXT_IRQ12 28
-#define EXT_IRQ13 29
-#define EXT_IRQ14 30
-#define EXT_IRQ15 31
-#define EXT_IRQS 15
-
-#define IER_REGS *(volatile unsigned short *)IER
-#endif
-
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
-typedef void (*h8300_vector)(void);
-
-#endif /* _H8300_IRQ_H_ */
diff --git a/arch/h8300/include/asm/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h
deleted file mode 100644 (file)
index 9617cd5..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _H8300_IRQFLAGS_H
-#define _H8300_IRQFLAGS_H
-
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile ("stc ccr,%w0" : "=r" (flags));
-       return flags;
-}
-
-static inline void arch_local_irq_disable(void)
-{
-       asm volatile ("orc  #0x80,ccr" : : : "memory");
-}
-
-static inline void arch_local_irq_enable(void)
-{
-       asm volatile ("andc #0x7f,ccr" : : : "memory");
-}
-
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags = arch_local_save_flags();
-       arch_local_irq_disable();
-       return flags;
-}
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-       asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory");
-}
-
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
-{
-       return (flags & 0x80) == 0x80;
-}
-
-static inline bool arch_irqs_disabled(void)
-{
-       return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-#endif /* _H8300_IRQFLAGS_H */
diff --git a/arch/h8300/include/asm/kdebug.h b/arch/h8300/include/asm/kdebug.h
deleted file mode 100644 (file)
index 6ece1b0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
deleted file mode 100644 (file)
index be12a71..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_KMAP_TYPES_H
-#define _ASM_H8300_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local.h b/arch/h8300/include/asm/local.h
deleted file mode 100644 (file)
index fdd4efe..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_LOCAL_H_
-#define _H8300_LOCAL_H_
-
-#include <asm-generic/local.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local64.h b/arch/h8300/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
deleted file mode 100644 (file)
index ab9d964..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _H8300_MC146818RTC_H
-#define _H8300_MC146818RTC_H
-
-/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */
-
-#endif /* _H8300_MC146818RTC_H */
diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
deleted file mode 100644 (file)
index f44b730..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __H8300_MMU_CONTEXT_H
-#define __H8300_MMU_CONTEXT_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm-generic/mm_hooks.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       // mm->context = virt_to_phys(mm->pgd);
-       return(0);
-}
-
-#define destroy_context(mm)            do { } while(0)
-#define deactivate_mm(tsk,mm)           do { } while(0)
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-}
-
-static inline void activate_mm(struct mm_struct *prev_mm,
-                              struct mm_struct *next_mm)
-{
-}
-
-#endif
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644 (file)
index 458c1f7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h
deleted file mode 100644 (file)
index 837381a..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_PAGE_H
-#define _H8300_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-
-#define PAGE_SHIFT     (12)
-#define PAGE_SIZE      (1UL << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE-1))
-
-#include <asm/setup.h>
-
-#ifndef __ASSEMBLY__
-#define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr)     free_page(addr)
-
-#define clear_page(page)       memset((page), 0, PAGE_SIZE)
-#define copy_page(to,from)     memcpy((to), (from), PAGE_SIZE)
-
-#define clear_user_page(page, vaddr, pg)       clear_page(page)
-#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
-
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd[16]; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-typedef struct page *pgtable_t;
-
-#define pte_val(x)     ((x).pte)
-#define pmd_val(x)     ((&x)->pmd[0])
-#define pgd_val(x)     ((x).pgd)
-#define pgprot_val(x)  ((x).pgprot)
-
-#define __pte(x)       ((pte_t) { (x) } )
-#define __pmd(x)       ((pmd_t) { (x) } )
-#define __pgd(x)       ((pgd_t) { (x) } )
-#define __pgprot(x)    ((pgprot_t) { (x) } )
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/page_offset.h>
-
-#define PAGE_OFFSET            (PAGE_OFFSET_RAW)
-
-#ifndef __ASSEMBLY__
-
-#define __pa(vaddr)            virt_to_phys(vaddr)
-#define __va(paddr)            phys_to_virt((unsigned long)paddr)
-
-#define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
-
-#define MAP_NR(addr)           (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
-#define virt_to_page(addr)     (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page)     ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
-#define pfn_valid(page)                (page < max_mapnr)
-
-#define ARCH_PFN_OFFSET                (PAGE_OFFSET >> PAGE_SHIFT)
-
-#define        virt_addr_valid(kaddr)  (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
-                               ((void *)(kaddr) < (void *)memory_end))
-
-#endif /* __ASSEMBLY__ */
-
-#include <asm-generic/memory_model.h>
-#include <asm-generic/getorder.h>
-
-#endif /* _H8300_PAGE_H */
diff --git a/arch/h8300/include/asm/page_offset.h b/arch/h8300/include/asm/page_offset.h
deleted file mode 100644 (file)
index f870646..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-
-#define PAGE_OFFSET_RAW                0x00000000
-
diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h
deleted file mode 100644 (file)
index c3909e7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_PARAM_H
-#define _H8300_PARAM_H
-
-#include <uapi/asm/param.h>
-
-#define HZ             CONFIG_HZ
-#define        USER_HZ         HZ
-#define        CLOCKS_PER_SEC  (USER_HZ)
-#endif /* _H8300_PARAM_H */
diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h
deleted file mode 100644 (file)
index 0b2acaa..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_H8300_PCI_H
-#define _ASM_H8300_PCI_H
-
-/*
- * asm-h8300/pci.h - H8/300 specific PCI declarations.
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#define pcibios_assign_all_busses()    0
-
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
-       /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define PCI_DMA_BUS_IS_PHYS    (1)
-
-#endif /* _ASM_H8300_PCI_H */
diff --git a/arch/h8300/include/asm/percpu.h b/arch/h8300/include/asm/percpu.h
deleted file mode 100644 (file)
index 72c03e3..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_H8300_PERCPU__
-#define __ARCH_H8300_PERCPU__
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_H8300_PERCPU__ */
diff --git a/arch/h8300/include/asm/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
deleted file mode 100644 (file)
index c2e89a2..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _H8300_PGALLOC_H
-#define _H8300_PGALLOC_H
-
-#include <asm/setup.h>
-
-#define check_pgt_cache()      do { } while (0)
-
-#endif /* _H8300_PGALLOC_H */
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
deleted file mode 100644 (file)
index 7ca20f8..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _H8300_PGTABLE_H
-#define _H8300_PGTABLE_H
-
-#include <asm-generic/4level-fixup.h>
-
-#include <linux/slab.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/io.h>
-
-#define pgd_present(pgd)     (1)       /* pages are always present on NO_MM */
-#define pgd_none(pgd)          (0)
-#define pgd_bad(pgd)           (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr)  (1)
-#define        pmd_offset(a, b)        ((void *)0)
-#define pmd_none(pmd)           (1)
-#define pgd_offset_k(adrdress)  ((pgd_t *)0)
-#define pte_offset_kernel(dir, address) ((pte_t *)0)
-
-#define PAGE_NONE              __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_SHARED            __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_COPY              __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_READONLY  __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_KERNEL            __pgprot(0)    /* these mean nothing to NO_MM */
-
-extern void paging_init(void);
-#define swapper_pg_dir ((pgd_t *) 0)
-
-#define __swp_type(x)          (0)
-#define __swp_offset(x)                (0)
-#define __swp_entry(typ,off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
-
-static inline int pte_file(pte_t pte) { return 0; }
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)       (virt_to_page(0))
-
-/*
- * These would be in other places but having them here reduces the diffs.
- */
-extern unsigned int kobjsize(const void *objp);
-extern int is_in_rom(unsigned long);
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define        VMALLOC_START   0
-#define        VMALLOC_END     0xffffffff
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define        VMALLOC_START   0
-#define        VMALLOC_END     0xffffffff
-
-#define arch_enter_lazy_cpu_mode()    do {} while (0)
-
-#include <asm-generic/pgtable.h>
-
-#endif /* _H8300_PGTABLE_H */
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
deleted file mode 100644 (file)
index 4b0ca49..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * include/asm-h8300/processor.h
- *
- * Copyright (C) 2002 Yoshinori Sato
- *
- * Based on: linux/asm-m68nommu/processor.h
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#ifndef __ASM_H8300_PROCESSOR_H
-#define __ASM_H8300_PROCESSOR_H
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-#include <linux/compiler.h>
-#include <asm/segment.h>
-#include <asm/fpu.h>
-#include <asm/ptrace.h>
-#include <asm/current.h>
-
-static inline unsigned long rdusp(void) {
-       extern unsigned int     sw_usp;
-       return(sw_usp);
-}
-
-static inline void wrusp(unsigned long usp) {
-       extern unsigned int     sw_usp;
-       sw_usp = usp;
-}
-
-/*
- * User space process size: 3.75GB. This is hardcoded into a few places,
- * so don't change it unless you know what you are doing.
- */
-#define TASK_SIZE      (0xFFFFFFFFUL)
-
-#ifdef __KERNEL__
-#define STACK_TOP      TASK_SIZE
-#define STACK_TOP_MAX  STACK_TOP
-#endif
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-#define TASK_UNMAPPED_BASE     0
-
-struct thread_struct {
-       unsigned long  ksp;             /* kernel stack pointer */
-       unsigned long  usp;             /* user stack pointer */
-       unsigned long  ccr;             /* saved status register */
-       unsigned long  esp0;            /* points to SR of stack frame */
-       struct {
-               unsigned short *addr;
-               unsigned short inst;
-       } breakinfo;
-};
-
-#define INIT_THREAD  {                                         \
-       .ksp  = sizeof(init_stack) + (unsigned long)init_stack, \
-       .usp  = 0,                                              \
-       .ccr  = PS_S,                                           \
-       .esp0 = 0,                                              \
-       .breakinfo = {                                          \
-               .addr = (unsigned short *)-1,                   \
-               .inst = 0                                       \
-       }                                                       \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- *
- * pass the data segment into user programs if it exists,
- * it can't hurt anything as far as I can tell
- */
-#if defined(__H8300H__)
-#define start_thread(_regs, _pc, _usp)                         \
-do {                                                           \
-       (_regs)->pc = (_pc);                                    \
-       (_regs)->ccr = 0x00;       /* clear all flags */        \
-       (_regs)->er5 = current->mm->start_data; /* GOT base */  \
-       wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \
-} while(0)
-#endif
-#if defined(__H8300S__)
-#define start_thread(_regs, _pc, _usp)                         \
-do {                                                           \
-       (_regs)->pc = (_pc);                                    \
-       (_regs)->ccr = 0x00;       /* clear kernel flag */      \
-       (_regs)->exr = 0x78;       /* enable all interrupts */  \
-       (_regs)->er5 = current->mm->start_data; /* GOT base */  \
-       /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \
-       wrusp(((unsigned long)(_usp)) - 14);                    \
-} while(0)
-#endif
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/*
- * Free current thread data structures etc..
- */
-static inline void exit_thread(void)
-{
-}
-
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
-unsigned long get_wchan(struct task_struct *p);
-
-#define        KSTK_EIP(tsk)   \
-    ({                 \
-       unsigned long eip = 0;   \
-       if ((tsk)->thread.esp0 > PAGE_SIZE && \
-           MAP_NR((tsk)->thread.esp0) < max_mapnr) \
-             eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
-       eip; })
-#define        KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-#define cpu_relax()    barrier()
-
-#define HARD_RESET_NOW() ({            \
-        local_irq_disable();           \
-        asm("jmp @@0");                        \
-})
-
-#endif
diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h
deleted file mode 100644 (file)
index c1826b9..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _H8300_PTRACE_H
-#define _H8300_PTRACE_H
-
-#include <uapi/asm/ptrace.h>
-
-#ifndef __ASSEMBLY__
-#if defined(CONFIG_CPU_H8S)
-#endif
-#ifndef PS_S
-#define PS_S  (0x10)
-#endif
-
-#if defined(__H8300H__)
-#define H8300_REGS_NO 11
-#endif
-#if defined(__H8300S__)
-#define H8300_REGS_NO 12
-#endif
-
-/* Find the stack offset for a register, relative to thread.esp0. */
-#define PT_REG(reg)    ((long)&((struct pt_regs *)0)->reg)
-
-#define arch_has_single_step() (1)
-
-#define user_mode(regs) (!((regs)->ccr & PS_S))
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-#define current_pt_regs() ((struct pt_regs *) \
-       (THREAD_SIZE + (unsigned long)current_thread_info()) - 1)
-#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0)
-#define current_user_stack_pointer() rdusp()
-#endif /* __ASSEMBLY__ */
-#endif /* _H8300_PTRACE_H */
diff --git a/arch/h8300/include/asm/regs267x.h b/arch/h8300/include/asm/regs267x.h
deleted file mode 100644 (file)
index 1bff731..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x                                 */
-
-#if !defined(__REGS_H8S267x__)
-#define __REGS_H8S267x__ 
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0  0xFFFFA4
-#define DADR1  0xFFFFA5
-#define DACR01 0xFFFFA6
-#define DADR2  0xFFFFA8
-#define DADR3  0xFFFFA9
-#define DACR23 0xFFFFAA
-
-#define ADDRA  0xFFFF90
-#define ADDRAH 0xFFFF90
-#define ADDRAL 0xFFFF91
-#define ADDRB  0xFFFF92
-#define ADDRBH 0xFFFF92
-#define ADDRBL 0xFFFF93
-#define ADDRC  0xFFFF94
-#define ADDRCH 0xFFFF94
-#define ADDRCL 0xFFFF95
-#define ADDRD  0xFFFF96
-#define ADDRDH 0xFFFF96
-#define ADDRDL 0xFFFF97
-#define ADDRE  0xFFFF98
-#define ADDREH 0xFFFF98
-#define ADDREL 0xFFFF99
-#define ADDRF  0xFFFF9A
-#define ADDRFH 0xFFFF9A
-#define ADDRFL 0xFFFF9B
-#define ADDRG  0xFFFF9C
-#define ADDRGH 0xFFFF9C
-#define ADDRGL 0xFFFF9D
-#define ADDRH  0xFFFF9E
-#define ADDRHH 0xFFFF9E
-#define ADDRHL 0xFFFF9F
-
-#define ADCSR  0xFFFFA0
-#define ADCR   0xFFFFA1
-
-#define ABWCR  0xFFFEC0
-#define ASTCR  0xFFFEC1
-#define WTCRAH 0xFFFEC2
-#define WTCRAL 0xFFFEC3
-#define WTCRBH 0xFFFEC4
-#define WTCRBL 0xFFFEC5
-#define RDNCR  0xFFFEC6
-#define CSACRH 0xFFFEC8
-#define CSACRL 0xFFFEC9
-#define BROMCRH 0xFFFECA
-#define BROMCRL 0xFFFECB
-#define BCR    0xFFFECC
-#define DRAMCR 0xFFFED0
-#define DRACCR 0xFFFED2
-#define REFCR  0xFFFED4
-#define RTCNT  0xFFFED6
-#define RTCOR  0xFFFED7
-
-#define MAR0AH  0xFFFEE0
-#define MAR0AL  0xFFFEE2
-#define IOAR0A  0xFFFEE4
-#define ETCR0A  0xFFFEE6
-#define MAR0BH  0xFFFEE8
-#define MAR0BL  0xFFFEEA
-#define IOAR0B  0xFFFEEC
-#define ETCR0B  0xFFFEEE
-#define MAR1AH  0xFFFEF0
-#define MAR1AL  0xFFFEF2
-#define IOAR1A  0xFFFEF4
-#define ETCR1A  0xFFFEF6
-#define MAR1BH  0xFFFEF8
-#define MAR1BL  0xFFFEFA
-#define IOAR1B  0xFFFEFC
-#define ETCR1B  0xFFFEFE
-#define DMAWER  0xFFFF20
-#define DMATCR  0xFFFF21
-#define DMACR0A 0xFFFF22
-#define DMACR0B 0xFFFF23
-#define DMACR1A 0xFFFF24
-#define DMACR1B 0xFFFF25
-#define DMABCRH 0xFFFF26
-#define DMABCRL 0xFFFF27
-
-#define EDSAR0  0xFFFDC0
-#define EDDAR0  0xFFFDC4
-#define EDTCR0  0xFFFDC8
-#define EDMDR0  0xFFFDCC
-#define EDMDR0H 0xFFFDCC
-#define EDMDR0L 0xFFFDCD
-#define EDACR0  0xFFFDCE
-#define EDSAR1  0xFFFDD0
-#define EDDAR1  0xFFFDD4
-#define EDTCR1  0xFFFDD8
-#define EDMDR1  0xFFFDDC
-#define EDMDR1H 0xFFFDDC
-#define EDMDR1L 0xFFFDDD
-#define EDACR1  0xFFFDDE
-#define EDSAR2  0xFFFDE0
-#define EDDAR2  0xFFFDE4
-#define EDTCR2  0xFFFDE8
-#define EDMDR2  0xFFFDEC
-#define EDMDR2H 0xFFFDEC
-#define EDMDR2L 0xFFFDED
-#define EDACR2  0xFFFDEE
-#define EDSAR3  0xFFFDF0
-#define EDDAR3  0xFFFDF4
-#define EDTCR3  0xFFFDF8
-#define EDMDR3  0xFFFDFC
-#define EDMDR3H 0xFFFDFC
-#define EDMDR3L 0xFFFDFD
-#define EDACR3  0xFFFDFE
-
-#define IPRA  0xFFFE00
-#define IPRB  0xFFFE02
-#define IPRC  0xFFFE04
-#define IPRD  0xFFFE06
-#define IPRE  0xFFFE08
-#define IPRF  0xFFFE0A
-#define IPRG  0xFFFE0C
-#define IPRH  0xFFFE0E
-#define IPRI  0xFFFE10
-#define IPRJ  0xFFFE12
-#define IPRK  0xFFFE14
-#define ITSR  0xFFFE16
-#define SSIER 0xFFFE18
-#define ISCRH 0xFFFE1A
-#define ISCRL 0xFFFE1C
-
-#define INTCR 0xFFFF31
-#define IER   0xFFFF32
-#define IERH  0xFFFF32
-#define IERL  0xFFFF33
-#define ISR   0xFFFF34
-#define ISRH  0xFFFF34
-#define ISRL  0xFFFF35
-
-#define P1DDR 0xFFFE20
-#define P2DDR 0xFFFE21
-#define P3DDR 0xFFFE22
-#define P4DDR 0xFFFE23
-#define P5DDR 0xFFFE24
-#define P6DDR 0xFFFE25
-#define P7DDR 0xFFFE26
-#define P8DDR 0xFFFE27
-#define P9DDR 0xFFFE28
-#define PADDR 0xFFFE29
-#define PBDDR 0xFFFE2A
-#define PCDDR 0xFFFE2B
-#define PDDDR 0xFFFE2C
-#define PEDDR 0xFFFE2D
-#define PFDDR 0xFFFE2E
-#define PGDDR 0xFFFE2F
-#define PHDDR 0xFFFF74
-
-#define PFCR0 0xFFFE32
-#define PFCR1 0xFFFE33
-#define PFCR2 0xFFFE34
-
-#define PAPCR 0xFFFE36
-#define PBPCR 0xFFFE37
-#define PCPCR 0xFFFE38
-#define PDPCR 0xFFFE39
-#define PEPCR 0xFFFE3A
-
-#define P3ODR 0xFFFE3C
-#define PAODR 0xFFFE3D
-
-#define P1DR  0xFFFF60
-#define P2DR  0xFFFF61
-#define P3DR  0xFFFF62
-#define P4DR  0xFFFF63
-#define P5DR  0xFFFF64
-#define P6DR  0xFFFF65
-#define P7DR  0xFFFF66
-#define P8DR  0xFFFF67
-#define P9DR  0xFFFF68
-#define PADR  0xFFFF69
-#define PBDR  0xFFFF6A
-#define PCDR  0xFFFF6B
-#define PDDR  0xFFFF6C
-#define PEDR  0xFFFF6D
-#define PFDR  0xFFFF6E
-#define PGDR  0xFFFF6F
-#define PHDR  0xFFFF72
-
-#define PORT1 0xFFFF50
-#define PORT2 0xFFFF51
-#define PORT3 0xFFFF52
-#define PORT4 0xFFFF53
-#define PORT5 0xFFFF54
-#define PORT6 0xFFFF55
-#define PORT7 0xFFFF56
-#define PORT8 0xFFFF57
-#define PORT9 0xFFFF58
-#define PORTA 0xFFFF59
-#define PORTB 0xFFFF5A
-#define PORTC 0xFFFF5B
-#define PORTD 0xFFFF5C
-#define PORTE 0xFFFF5D
-#define PORTF 0xFFFF5E
-#define PORTG 0xFFFF5F
-#define PORTH 0xFFFF70
-
-#define PCR   0xFFFF46
-#define PMR   0xFFFF47
-#define NDERH 0xFFFF48
-#define NDERL 0xFFFF49
-#define PODRH 0xFFFF4A
-#define PODRL 0xFFFF4B
-#define NDRH1 0xFFFF4C
-#define NDRL1 0xFFFF4D
-#define NDRH2 0xFFFF4E
-#define NDRL2 0xFFFF4F
-
-#define SMR0  0xFFFF78
-#define BRR0  0xFFFF79
-#define SCR0  0xFFFF7A
-#define TDR0  0xFFFF7B
-#define SSR0  0xFFFF7C
-#define RDR0  0xFFFF7D
-#define SCMR0 0xFFFF7E
-#define SMR1  0xFFFF80
-#define BRR1  0xFFFF81
-#define SCR1  0xFFFF82
-#define TDR1  0xFFFF83
-#define SSR1  0xFFFF84
-#define RDR1  0xFFFF85
-#define SCMR1 0xFFFF86
-#define SMR2  0xFFFF88
-#define BRR2  0xFFFF89
-#define SCR2  0xFFFF8A
-#define TDR2  0xFFFF8B
-#define SSR2  0xFFFF8C
-#define RDR2  0xFFFF8D
-#define SCMR2 0xFFFF8E
-
-#define IRCR0 0xFFFE1E
-#define SEMR  0xFFFDA8
-
-#define MDCR    0xFFFF3E
-#define SYSCR   0xFFFF3D
-#define MSTPCRH 0xFFFF40
-#define MSTPCRL 0xFFFF41
-#define FLMCR1  0xFFFFC8
-#define FLMCR2  0xFFFFC9
-#define EBR1    0xFFFFCA
-#define EBR2    0xFFFFCB
-#define CTGARC_RAMCR   0xFFFECE
-#define SBYCR   0xFFFF3A
-#define SCKCR   0xFFFF3B
-#define PLLCR   0xFFFF45
-
-#define TSTR   0xFFFFC0
-#define TSNC   0XFFFFC1
-
-#define TCR0   0xFFFFD0
-#define TMDR0  0xFFFFD1
-#define TIORH0 0xFFFFD2
-#define TIORL0 0xFFFFD3
-#define TIER0  0xFFFFD4
-#define TSR0   0xFFFFD5
-#define TCNT0  0xFFFFD6
-#define GRA0   0xFFFFD8
-#define GRB0   0xFFFFDA
-#define GRC0   0xFFFFDC
-#define GRD0   0xFFFFDE
-#define TCR1   0xFFFFE0
-#define TMDR1  0xFFFFE1
-#define TIORH1 0xFFFFE2
-#define TIORL1 0xFFFFE3
-#define TIER1  0xFFFFE4
-#define TSR1   0xFFFFE5
-#define TCNT1  0xFFFFE6
-#define GRA1   0xFFFFE8
-#define GRB1   0xFFFFEA
-#define TCR2   0xFFFFF0
-#define TMDR2  0xFFFFF1
-#define TIORH2 0xFFFFF2
-#define TIORL2 0xFFFFF3
-#define TIER2  0xFFFFF4
-#define TSR2   0xFFFFF5
-#define TCNT2  0xFFFFF6
-#define GRA2   0xFFFFF8
-#define GRB2   0xFFFFFA
-#define TCR3   0xFFFE80
-#define TMDR3  0xFFFE81
-#define TIORH3 0xFFFE82
-#define TIORL3 0xFFFE83
-#define TIER3  0xFFFE84
-#define TSR3   0xFFFE85
-#define TCNT3  0xFFFE86
-#define GRA3   0xFFFE88
-#define GRB3   0xFFFE8A
-#define GRC3   0xFFFE8C
-#define GRD3   0xFFFE8E
-#define TCR4   0xFFFE90
-#define TMDR4  0xFFFE91
-#define TIORH4 0xFFFE92
-#define TIORL4 0xFFFE93
-#define TIER4  0xFFFE94
-#define TSR4   0xFFFE95
-#define TCNT4  0xFFFE96
-#define GRA4   0xFFFE98
-#define GRB4   0xFFFE9A
-#define TCR5   0xFFFEA0
-#define TMDR5  0xFFFEA1
-#define TIORH5 0xFFFEA2
-#define TIORL5 0xFFFEA3
-#define TIER5  0xFFFEA4
-#define TSR5   0xFFFEA5
-#define TCNT5  0xFFFEA6
-#define GRA5   0xFFFEA8
-#define GRB5   0xFFFEAA
-
-#define _8TCR0   0xFFFFB0
-#define _8TCR1   0xFFFFB1
-#define _8TCSR0  0xFFFFB2
-#define _8TCSR1  0xFFFFB3
-#define _8TCORA0 0xFFFFB4
-#define _8TCORA1 0xFFFFB5
-#define _8TCORB0 0xFFFFB6
-#define _8TCORB1 0xFFFFB7
-#define _8TCNT0  0xFFFFB8
-#define _8TCNT1  0xFFFFB9
-
-#define TCSR    0xFFFFBC
-#define TCNT    0xFFFFBD
-#define RSTCSRW 0xFFFFBE
-#define RSTCSRR 0xFFFFBF
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8S267x__ */
diff --git a/arch/h8300/include/asm/regs306x.h b/arch/h8300/include/asm/regs306x.h
deleted file mode 100644 (file)
index 027dd63..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x                                 */
-
-#if !defined(__REGS_H8306x__)
-#define __REGS_H8306x__ 
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0  0xFEE09C
-#define DADR1  0xFEE09D
-#define DACR   0xFEE09E
-
-#define ADDRAH 0xFFFFE0
-#define ADDRAL 0xFFFFE1
-#define ADDRBH 0xFFFFE2
-#define ADDRBL 0xFFFFE3
-#define ADDRCH 0xFFFFE4
-#define ADDRCL 0xFFFFE5
-#define ADDRDH 0xFFFFE6
-#define ADDRDL 0xFFFFE7
-#define ADCSR  0xFFFFE8
-#define ADCR   0xFFFFE9
-
-#define BRCR   0xFEE013
-#define ADRCR  0xFEE01E
-#define CSCR   0xFEE01F
-#define ABWCR  0xFEE020
-#define ASTCR  0xFEE021
-#define WCRH   0xFEE022
-#define WCRL   0xFEE023
-#define BCR    0xFEE024
-#define DRCRA  0xFEE026
-#define DRCRB  0xFEE027
-#define RTMCSR 0xFEE028
-#define RTCNT  0xFEE029
-#define RTCOR  0xFEE02A
-
-#define MAR0AR  0xFFFF20
-#define MAR0AE  0xFFFF21
-#define MAR0AH  0xFFFF22
-#define MAR0AL  0xFFFF23
-#define ETCR0AL 0xFFFF24
-#define ETCR0AH 0xFFFF25
-#define IOAR0A  0xFFFF26
-#define DTCR0A  0xFFFF27
-#define MAR0BR  0xFFFF28
-#define MAR0BE  0xFFFF29
-#define MAR0BH  0xFFFF2A
-#define MAR0BL  0xFFFF2B
-#define ETCR0BL 0xFFFF2C
-#define ETCR0BH 0xFFFF2D
-#define IOAR0B  0xFFFF2E
-#define DTCR0B  0xFFFF2F
-#define MAR1AR  0xFFFF30
-#define MAR1AE  0xFFFF31
-#define MAR1AH  0xFFFF32
-#define MAR1AL  0xFFFF33
-#define ETCR1AL 0xFFFF34
-#define ETCR1AH 0xFFFF35
-#define IOAR1A  0xFFFF36
-#define DTCR1A  0xFFFF37
-#define MAR1BR  0xFFFF38
-#define MAR1BE  0xFFFF39
-#define MAR1BH  0xFFFF3A
-#define MAR1BL  0xFFFF3B
-#define ETCR1BL 0xFFFF3C
-#define ETCR1BH 0xFFFF3D
-#define IOAR1B  0xFFFF3E
-#define DTCR1B  0xFFFF3F
-
-#define ISCR 0xFEE014
-#define IER  0xFEE015
-#define ISR  0xFEE016
-#define IPRA 0xFEE018
-#define IPRB 0xFEE019
-
-#define P1DDR 0xFEE000
-#define P2DDR 0xFEE001
-#define P3DDR 0xFEE002
-#define P4DDR 0xFEE003
-#define P5DDR 0xFEE004
-#define P6DDR 0xFEE005
-/*#define P7DDR 0xFEE006*/
-#define P8DDR 0xFEE007
-#define P9DDR 0xFEE008
-#define PADDR 0xFEE009
-#define PBDDR 0xFEE00A
-
-#define P1DR  0xFFFFD0
-#define P2DR  0xFFFFD1
-#define P3DR  0xFFFFD2
-#define P4DR  0xFFFFD3
-#define P5DR  0xFFFFD4
-#define P6DR  0xFFFFD5
-/*#define P7DR  0xFFFFD6*/
-#define P8DR  0xFFFFD7
-#define P9DR  0xFFFFD8
-#define PADR  0xFFFFD9
-#define PBDR  0xFFFFDA
-
-#define P2CR  0xFEE03C
-#define P4CR  0xFEE03E
-#define P5CR  0xFEE03F
-
-#define SMR0  0xFFFFB0
-#define BRR0  0xFFFFB1
-#define SCR0  0xFFFFB2
-#define TDR0  0xFFFFB3
-#define SSR0  0xFFFFB4
-#define RDR0  0xFFFFB5
-#define SCMR0 0xFFFFB6
-#define SMR1  0xFFFFB8
-#define BRR1  0xFFFFB9
-#define SCR1  0xFFFFBA
-#define TDR1  0xFFFFBB
-#define SSR1  0xFFFFBC
-#define RDR1  0xFFFFBD
-#define SCMR1 0xFFFFBE
-#define SMR2  0xFFFFC0
-#define BRR2  0xFFFFC1
-#define SCR2  0xFFFFC2
-#define TDR2  0xFFFFC3
-#define SSR2  0xFFFFC4
-#define RDR2  0xFFFFC5
-#define SCMR2 0xFFFFC6
-
-#define MDCR   0xFEE011
-#define SYSCR  0xFEE012
-#define DIVCR  0xFEE01B
-#define MSTCRH 0xFEE01C
-#define MSTCRL 0xFEE01D
-#define FLMCR1 0xFEE030
-#define FLMCR2 0xFEE031
-#define EBR1   0xFEE032
-#define EBR2   0xFEE033
-#define RAMCR  0xFEE077
-
-#define TSTR   0xFFFF60
-#define TSNC   0XFFFF61
-#define TMDR   0xFFFF62
-#define TOLR   0xFFFF63
-#define TISRA  0xFFFF64
-#define TISRB  0xFFFF65
-#define TISRC  0xFFFF66
-#define TCR0   0xFFFF68
-#define TIOR0  0xFFFF69
-#define TCNT0H 0xFFFF6A
-#define TCNT0L 0xFFFF6B
-#define GRA0H  0xFFFF6C
-#define GRA0L  0xFFFF6D
-#define GRB0H  0xFFFF6E
-#define GRB0L  0xFFFF6F
-#define TCR1   0xFFFF70
-#define TIOR1  0xFFFF71
-#define TCNT1H 0xFFFF72
-#define TCNT1L 0xFFFF73
-#define GRA1H  0xFFFF74
-#define GRA1L  0xFFFF75
-#define GRB1H  0xFFFF76
-#define GRB1L  0xFFFF77
-#define TCR3   0xFFFF78
-#define TIOR3  0xFFFF79
-#define TCNT3H 0xFFFF7A
-#define TCNT3L 0xFFFF7B
-#define GRA3H  0xFFFF7C
-#define GRA3L  0xFFFF7D
-#define GRB3H  0xFFFF7E
-#define GRB3L  0xFFFF7F
-
-#define _8TCR0  0xFFFF80
-#define _8TCR1  0xFFFF81
-#define _8TCSR0 0xFFFF82
-#define _8TCSR1 0xFFFF83
-#define TCORA0 0xFFFF84
-#define TCORA1 0xFFFF85
-#define TCORB0 0xFFFF86
-#define TCORB1 0xFFFF87
-#define _8TCNT0 0xFFFF88
-#define _8TCNT1 0xFFFF89
-
-#define _8TCR2  0xFFFF90
-#define _8TCR3  0xFFFF91
-#define _8TCSR2 0xFFFF92
-#define _8TCSR3 0xFFFF93
-#define TCORA2 0xFFFF94
-#define TCORA3 0xFFFF95
-#define TCORB2 0xFFFF96
-#define TCORB3 0xFFFF97
-#define _8TCNT2 0xFFFF98
-#define _8TCNT3 0xFFFF99
-
-#define TCSR   0xFFFF8C
-#define TCNT   0xFFFF8D
-#define RSTCSR 0xFFFF8F
-
-#define TPMR  0xFFFFA0
-#define TPCR  0xFFFFA1
-#define NDERB 0xFFFFA2
-#define NDERA 0xFFFFA3
-#define NDRB1 0xFFFFA4
-#define NDRA1 0xFFFFA5
-#define NDRB2 0xFFFFA6
-#define NDRA2 0xFFFFA7
-
-#define TCSR    0xFFFF8C
-#define TCNT    0xFFFF8D
-#define RSTCSRW 0xFFFF8E
-#define RSTCSRR 0xFFFF8F
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8306x__ */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
deleted file mode 100644 (file)
index 82130ed..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SCATTERLIST_H
-#define _H8300_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_H8300_SCATTERLIST_H) */
diff --git a/arch/h8300/include/asm/sections.h b/arch/h8300/include/asm/sections.h
deleted file mode 100644 (file)
index a81743e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SECTIONS_H_
-#define _H8300_SECTIONS_H_
-
-#include <asm-generic/sections.h>
-
-#endif
diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h
deleted file mode 100644 (file)
index b79a82d..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_SEGMENT_H
-#define _H8300_SEGMENT_H
-
-/* define constants */
-#define USER_DATA     (1)
-#ifndef __USER_DS
-#define __USER_DS     (USER_DATA)
-#endif
-#define USER_PROGRAM  (2)
-#define SUPER_DATA    (3)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS   (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (4)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define USER_DS                MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
-
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-
-static inline mm_segment_t get_fs(void)
-{
-    return USER_DS;
-}
-
-static inline mm_segment_t get_ds(void)
-{
-    /* return the supervisor data space code */
-    return KERNEL_DS;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
-}
-
-#define segment_eq(a,b)        ((a).seg == (b).seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _H8300_SEGMENT_H */
diff --git a/arch/h8300/include/asm/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
deleted file mode 100644 (file)
index b6bb6e5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* eCos HAL interface header */
-
-#ifndef SH_BIOS_H
-#define SH_BIOS_H
-
-#define HAL_IF_VECTOR_TABLE 0xfffe20
-#define CALL_IF_SET_CONSOLE_COMM  13
-#define QUERY_CURRENT -1
-#define MANGLER       -3
-
-/* Checking for GDB stub active */
-/* suggestion Jonathan Larmour */
-static int sh_bios_in_gdb_mode(void)
-{
-       static int gdb_active = -1;
-       if (gdb_active == -1) {
-               int (*set_console_comm)(int);
-               set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM];
-               gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER);
-       }
-       return gdb_active;
-}
-
-static void sh_bios_gdb_detach(void)
-{
-
-}
-
-#endif
diff --git a/arch/h8300/include/asm/shm.h b/arch/h8300/include/asm/shm.h
deleted file mode 100644 (file)
index ed6623c..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_SHM_H
-#define _H8300_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
-   currently out in swap space (see also mm/swap.c):
-   bits 0-1 (PAGE_PRESENT) is  = 0
-   bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
-   bits 31..9 are used like this:
-   bits 15..9 (SHM_ID) the id of the shared memory segment
-   bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
-                    (actually only bits 25..16 get used since SHMMAX is so low)
-   bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
-   others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT   9
-#else
-#define SHM_ID_SHIFT   7
-#endif
-#define _SHM_ID_BITS   7
-#define SHM_ID_MASK    ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT  (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS  15
-#define SHM_IDX_MASK   ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _H8300_SHM_H */
diff --git a/arch/h8300/include/asm/shmparam.h b/arch/h8300/include/asm/shmparam.h
deleted file mode 100644 (file)
index d186395..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SHMPARAM_H
-#define _H8300_SHMPARAM_H
-
-#define        SHMLBA PAGE_SIZE                 /* attach addr a multiple of this */
-
-#endif /* _H8300_SHMPARAM_H */
diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
deleted file mode 100644 (file)
index 6341e36..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _H8300_SIGNAL_H
-#define _H8300_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-
-/* Most things should be clean enough to redefine this at will, if care
-   is taken to make libc match.  */
-
-#define _NSIG          64
-#define _NSIG_BPW      32
-#define _NSIG_WORDS    (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t;            /* at least 32 bits */
-
-typedef struct {
-       unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define __ARCH_HAS_SA_RESTORER
-
-#include <asm/sigcontext.h>
-#undef __HAVE_ARCH_SIG_BITOPS
-
-#endif /* _H8300_SIGNAL_H */
diff --git a/arch/h8300/include/asm/smp.h b/arch/h8300/include/asm/smp.h
deleted file mode 100644 (file)
index 9e9bd7e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* nothing required here yet */
diff --git a/arch/h8300/include/asm/spinlock.h b/arch/h8300/include/asm/spinlock.h
deleted file mode 100644 (file)
index d5407fa..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SPINLOCK_H
-#define __H8300_SPINLOCK_H
-
-#error "H8/300 doesn't do SMP yet"
-
-#endif
diff --git a/arch/h8300/include/asm/string.h b/arch/h8300/include/asm/string.h
deleted file mode 100644 (file)
index ca50348..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _H8300_STRING_H_
-#define _H8300_STRING_H_
-
-#ifdef __KERNEL__ /* only set these up for kernel code */
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define __HAVE_ARCH_MEMSET
-extern void * memset(void * s, int c, size_t count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void * memcpy(void *d, const void *s, size_t count);
-
-#else /* KERNEL */
-
-/*
- *     let user libraries deal with these,
- *     IMHO the kernel has no place defining these functions for user apps
- */
-
-#define __HAVE_ARCH_STRCPY 1
-#define __HAVE_ARCH_STRNCPY 1
-#define __HAVE_ARCH_STRCAT 1
-#define __HAVE_ARCH_STRNCAT 1
-#define __HAVE_ARCH_STRCMP 1
-#define __HAVE_ARCH_STRNCMP 1
-#define __HAVE_ARCH_STRNICMP 1
-#define __HAVE_ARCH_STRCHR 1
-#define __HAVE_ARCH_STRRCHR 1
-#define __HAVE_ARCH_STRSTR 1
-#define __HAVE_ARCH_STRLEN 1
-#define __HAVE_ARCH_STRNLEN 1
-#define __HAVE_ARCH_MEMSET 1
-#define __HAVE_ARCH_MEMCPY 1
-#define __HAVE_ARCH_MEMMOVE 1
-#define __HAVE_ARCH_MEMSCAN 1
-#define __HAVE_ARCH_MEMCMP 1
-#define __HAVE_ARCH_MEMCHR 1
-#define __HAVE_ARCH_STRTOK 1
-
-#endif /* KERNEL */
-
-#endif /* _M68K_STRING_H_ */
diff --git a/arch/h8300/include/asm/switch_to.h b/arch/h8300/include/asm/switch_to.h
deleted file mode 100644 (file)
index cdd8731..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_SWITCH_TO_H
-#define _H8300_SWITCH_TO_H
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing.  This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1, offset of tss in d1, and whether
- * the mm structures are shared in d2 (to avoid atc flushing).
- *
- * H8/300 Porting 2002/09/04 Yoshinori Sato
- */
-
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) {                         \
-  void *_last;                                             \
-  __asm__ __volatile__(                                            \
-                       "mov.l  %1, er0\n\t"                \
-                       "mov.l  %2, er1\n\t"                \
-                        "mov.l  %3, er2\n\t"                \
-                       "jsr @_resume\n\t"                  \
-                        "mov.l  er2,%0\n\t"                 \
-                      : "=r" (_last)                       \
-                      : "r" (&(prev->thread)),             \
-                        "r" (&(next->thread)),             \
-                         "g" (prev)                         \
-                      : "cc", "er0", "er1", "er2", "er3"); \
-  (last) = _last;                                          \
-}
-
-#endif /* _H8300_SWITCH_TO_H */
diff --git a/arch/h8300/include/asm/target_time.h b/arch/h8300/include/asm/target_time.h
deleted file mode 100644 (file)
index 9f2a9aa..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *));
-extern void platform_timer_eoi(void);
-extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, 
-                            unsigned int *hour, unsigned int *min, unsigned int *sec);
diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h
deleted file mode 100644 (file)
index 93a63df..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_TERMIOS_H
-#define _H8300_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/*     intr=^C         quit=^|         erase=del       kill=^U
-       eof=^D          vtime=\0        vmin=\1         sxtc=\0
-       start=^Q        stop=^S         susp=^Z         eol=\0
-       reprint=^R      discard=^U      werase=^W       lnext=^V
-       eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
-       unsigned short tmp; \
-       get_user(tmp, &(termio)->c_iflag); \
-       (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
-       get_user(tmp, &(termio)->c_oflag); \
-       (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
-       get_user(tmp, &(termio)->c_cflag); \
-       (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
-       get_user(tmp, &(termio)->c_lflag); \
-       (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
-       get_user((termios)->c_line, &(termio)->c_line); \
-       copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
-       put_user((termios)->c_iflag, &(termio)->c_iflag); \
-       put_user((termios)->c_oflag, &(termio)->c_oflag); \
-       put_user((termios)->c_cflag, &(termio)->c_cflag); \
-       put_user((termios)->c_lflag, &(termio)->c_lflag); \
-       put_user((termios)->c_line,  &(termio)->c_line); \
-       copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* _H8300_TERMIOS_H */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
deleted file mode 100644 (file)
index ec2f777..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* thread_info.h: h8300 low-level thread information
- * adapted from the i386 and PPC versions by Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Copyright (C) 2002  David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds and Dave Miller
- */
-
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
-
-#include <asm/page.h>
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/*
- * low level task data.
- * If you change this, change the TI_* offsets below to match.
- */
-struct thread_info {
-       struct task_struct *task;               /* main task structure */
-       struct exec_domain *exec_domain;        /* execution domain */
-       unsigned long      flags;               /* low level flags */
-       int                cpu;                 /* cpu we're on */
-       int                preempt_count;       /* 0 => preemptable, <0 => BUG */
-       struct restart_block restart_block;
-};
-
-/*
- * macros/functions for gaining access to the thread information structure
- */
-#define INIT_THREAD_INFO(tsk)                  \
-{                                              \
-       .task =         &tsk,                   \
-       .exec_domain =  &default_exec_domain,   \
-       .flags =        0,                      \
-       .cpu =          0,                      \
-       .preempt_count = INIT_PREEMPT_COUNT,    \
-       .restart_block  = {                     \
-               .fn = do_no_restart_syscall,    \
-       },                                      \
-}
-
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
-
-/*
- * Size of kernel stack for each process. This must be a power of 2...
- */
-#define THREAD_SIZE_ORDER      1
-#define THREAD_SIZE            8192    /* 2 pages */
-
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       __asm__(
-               "mov.l  sp, %0 \n\t"
-               "and.l  %1, %0"
-               : "=&r"(ti)
-               : "i" (~(THREAD_SIZE-1))
-               );
-       return ti;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Offsets in thread_info structure, used in assembly code
- */
-#define TI_TASK                0
-#define TI_EXECDOMAIN  4
-#define TI_FLAGS       8
-#define TI_CPU         12
-#define TI_PRE_COUNT   16
-
-#define        PREEMPT_ACTIVE  0x4000000
-
-/*
- * thread information flag bit numbers
- */
-#define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_SIGPENDING         1       /* signal pending */
-#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
-#define TIF_MEMDIE             4       /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_NOTIFY_RESUME      6       /* callback before returning to user */
-
-/* as above, but as bit values */
-#define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
-#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
-
-#define _TIF_WORK_MASK         (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
-                                _TIF_NOTIFY_RESUME)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/h8300/include/asm/timer.h b/arch/h8300/include/asm/timer.h
deleted file mode 100644 (file)
index def8046..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __H8300_TIMER_H
-#define __H8300_TIMER_H
-
-void h8300_timer_tick(void);
-void h8300_timer_setup(void);
-void h8300_gettod(unsigned int *year, unsigned int *mon, unsigned int *day,
-                  unsigned int *hour, unsigned int *min, unsigned int *sec);
-
-#define TIMER_FREQ (CONFIG_CPU_CLOCK*10000) /* Timer input freq. */
-
-#define calc_param(cnt, div, rate, limit)                      \
-do {                                                           \
-       cnt = TIMER_FREQ / HZ;                                  \
-       for (div = 0; div < ARRAY_SIZE(divide_rate); div++) {   \
-               if (rate[div] == 0)                             \
-                       continue;                               \
-               if ((cnt / rate[div]) > limit)                  \
-                       break;                                  \
-       }                                                       \
-       if (div == ARRAY_SIZE(divide_rate))                     \
-               panic("Timer counter overflow");                \
-       cnt /= divide_rate[div];                                \
-} while(0)
-
-#endif
diff --git a/arch/h8300/include/asm/timex.h b/arch/h8300/include/asm/timex.h
deleted file mode 100644 (file)
index 23e6701..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * linux/include/asm-h8300/timex.h
- *
- * H8/300 architecture timex specifications
- */
-#ifndef _ASM_H8300_TIMEX_H
-#define _ASM_H8300_TIMEX_H
-
-#define CLOCK_TICK_RATE (CONFIG_CPU_CLOCK*1000/8192) /* Timer input freq. */
-
-typedef unsigned long cycles_t;
-extern short h8300_timer_count;
-
-static inline cycles_t get_cycles(void)
-{
-       return 0;
-}
-
-#endif
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
deleted file mode 100644 (file)
index 7f07430..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __H8300_TLB_H__
-#define __H8300_TLB_H__
-
-#define tlb_flush(tlb) do { } while(0)
-
-#include <asm-generic/tlb.h>
-
-#endif
diff --git a/arch/h8300/include/asm/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
deleted file mode 100644 (file)
index 41c148a..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _H8300_TLBFLUSH_H
-#define _H8300_TLBFLUSH_H
-
-/*
- * Copyright (C) 2000 Lineo, David McCullough <davidm@uclinux.org>
- * Copyright (C) 2000-2002, Greg Ungerer <gerg@snapgear.com>
- */
-
-#include <asm/setup.h>
-
-/*
- * flush all user-space atc entries.
- */
-static inline void __flush_tlb(void)
-{
-       BUG();
-}
-
-static inline void __flush_tlb_one(unsigned long addr)
-{
-       BUG();
-}
-
-#define flush_tlb() __flush_tlb()
-
-/*
- * flush all atc entries (both kernel and user-space entries).
- */
-static inline void flush_tlb_all(void)
-{
-       BUG();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-       BUG();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       BUG();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
-                                  unsigned long start, unsigned long end)
-{
-       BUG();
-}
-
-static inline void flush_tlb_kernel_page(unsigned long addr)
-{
-       BUG();
-}
-
-#endif /* _H8300_TLBFLUSH_H */
diff --git a/arch/h8300/include/asm/topology.h b/arch/h8300/include/asm/topology.h
deleted file mode 100644 (file)
index fdc1219..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_TOPOLOGY_H
-#define _ASM_H8300_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_H8300_TOPOLOGY_H */
diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h
deleted file mode 100644 (file)
index 41cf6be..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *  linux/include/asm-h8300/traps.h
- *
- *  Copyright (C) 2003 Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef _H8300_TRAPS_H
-#define _H8300_TRAPS_H
-
-extern void system_call(void);
-extern void interrupt_entry(void);
-extern void trace_break(void);
-
-#define JMP_OP 0x5a000000
-#define JSR_OP 0x5e000000
-#define VECTOR(address) ((JMP_OP)|((unsigned long)address))
-#define REDIRECT(address) ((JSR_OP)|((unsigned long)address))
-
-#define TRACE_VEC 5
-
-#define TRAP0_VEC 8
-#define TRAP1_VEC 9
-#define TRAP2_VEC 10
-#define TRAP3_VEC 11
-
-#if defined(__H8300H__)
-#define NR_TRAPS 12
-#endif
-#if defined(__H8300S__)
-#define NR_TRAPS 16
-#endif
-
-#endif /* _H8300_TRAPS_H */
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
deleted file mode 100644 (file)
index c012707..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_TYPES_H
-#define _H8300_TYPES_H
-
-#include <uapi/asm/types.h>
-
-
-#define BITS_PER_LONG 32
-
-#endif /* _H8300_TYPES_H */
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
deleted file mode 100644 (file)
index 8725d1a..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-#ifndef __H8300_UACCESS_H
-#define __H8300_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-
-#include <asm/segment.h>
-
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
-/* We let the MMU do all checking */
-#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size)
-static inline int __access_ok(unsigned long addr, unsigned long size)
-{
-#define        RANGE_CHECK_OK(addr, size, lower, upper) \
-       (((addr) >= (lower)) && (((addr) + (size)) < (upper)))
-
-       extern unsigned long _ramend;
-       return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend));
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-       unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- */
-
-#define put_user(x, ptr)                               \
-({                                                     \
-    int __pu_err = 0;                                  \
-    typeof(*(ptr)) __pu_val = (x);                     \
-    switch (sizeof (*(ptr))) {                         \
-    case 1:                                            \
-    case 2:                                            \
-    case 4:                                            \
-       *(ptr) = (__pu_val);                            \
-       break;                                          \
-    case 8:                                            \
-       memcpy(ptr, &__pu_val, sizeof (*(ptr)));        \
-       break;                                          \
-    default:                                           \
-       __pu_err = __put_user_bad();                    \
-       break;                                          \
-    }                                                  \
-    __pu_err;                                          \
-})
-#define __put_user(x, ptr) put_user(x, ptr)
-
-extern int __put_user_bad(void);
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define __ptr(x) ((unsigned long *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define get_user(x, ptr)                                       \
-({                                                             \
-    int __gu_err = 0;                                          \
-    typeof(*(ptr)) __gu_val = *ptr;                            \
-    switch (sizeof(*(ptr))) {                                  \
-    case 1:                                                    \
-    case 2:                                                    \
-    case 4:                                                    \
-    case 8:                                                    \
-       break;                                                  \
-    default:                                                   \
-       __gu_err = __get_user_bad();                            \
-       break;                                                  \
-    }                                                          \
-    (x) = __gu_val;                                            \
-    __gu_err;                                                  \
-})
-#define __get_user(x, ptr) get_user(x, ptr)
-
-extern int __get_user_bad(void);
-
-#define copy_from_user(to, from, n)            (memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n)              (memcpy(to, from, n), 0)
-
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-static inline long
-strncpy_from_user(char *dst, const char *src, long count)
-{
-       char *tmp;
-       strncpy(dst, src, count);
-       for (tmp = dst; *tmp && count > 0; tmp++, count--)
-               ;
-       return(tmp - dst); /* DAVIDM should we count a NUL ?  check getname */
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char *src, long n)
-{
-       return(strlen(src) + 1); /* DAVIDM make safer */
-}
-
-#define strlen_user(str) strnlen_user(str, 32767)
-
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-clear_user(void *to, unsigned long n)
-{
-       memset(to, 0, n);
-       return 0;
-}
-
-#define __clear_user   clear_user
-
-#endif /* _H8300_UACCESS_H */
diff --git a/arch/h8300/include/asm/ucontext.h b/arch/h8300/include/asm/ucontext.h
deleted file mode 100644 (file)
index 0bcf8f8..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_UCONTEXT_H
-#define _H8300_UCONTEXT_H
-
-struct ucontext {
-       unsigned long     uc_flags;
-       struct ucontext  *uc_link;
-       stack_t           uc_stack;
-       struct sigcontext uc_mcontext;
-       sigset_t          uc_sigmask;   /* mask last for extensibility */
-};
-
-#endif
diff --git a/arch/h8300/include/asm/unaligned.h b/arch/h8300/include/asm/unaligned.h
deleted file mode 100644 (file)
index b8d06c7..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_H8300_UNALIGNED_H
-#define _ASM_H8300_UNALIGNED_H
-
-#include <linux/unaligned/be_memmove.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned  __get_unaligned_be
-#define put_unaligned  __put_unaligned_be
-
-#endif /* _ASM_H8300_UNALIGNED_H */
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
deleted file mode 100644 (file)
index ab671ec..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _ASM_H8300_UNISTD_H_
-#define _ASM_H8300_UNISTD_H_
-
-#include <uapi/asm/unistd.h>
-
-
-#define NR_syscalls 321
-
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_MMAP
-#define __ARCH_WANT_SYS_OLD_SELECT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_CLONE
-
-#endif /* _ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h
deleted file mode 100644 (file)
index 14a9e18..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _H8300_USER_H
-#define _H8300_USER_H
-
-#include <asm/page.h>
-
-/* Core file format: The core file is written in such a way that gdb
-   can understand it and provide useful information to the user (under
-   linux we use the 'trad-core' bfd).  There are quite a number of
-   obstacles to being able to view the contents of the floating point
-   registers, and until these are solved you will not be able to view the
-   contents of them.  Actually, you can read in the core file and look at
-   the contents of the user struct to find out what the floating point
-   registers contain.
-   The actual file contents are as follows:
-   UPAGE: 1 page consisting of a user struct that tells gdb what is present
-   in the file.  Directly after this is a copy of the task_struct, which
-   is currently not used by gdb, but it may come in useful at some point.
-   All of the registers are stored as part of the upage.  The upage should
-   always be only one page.
-   DATA: The data area is stored.  We use current->end_text to
-   current->brk to pick up all of the user variables, plus any memory
-   that may have been malloced.  No attempt is made to determine if a page
-   is demand-zero or if a page is totally unused, we just cover the entire
-   range.  All of the addresses are rounded in such a way that an integral
-   number of pages is written.
-   STACK: We need the stack information in order to get a meaningful
-   backtrace.  We need to write the data from (esp) to
-   current->start_stack, so we round each of these off in order to be able
-   to write an integer number of pages.
-   The minimum core file size is 3 pages, or 12288 bytes.
-*/
-
-/* This is the old layout of "struct pt_regs" as of Linux 1.x, and
-   is still the layout used by user (the new pt_regs doesn't have
-   all registers). */
-struct user_regs_struct {
-       long er1,er2,er3,er4,er5,er6;
-       long er0;
-       long usp;
-       long orig_er0;
-       short ccr;
-       long pc;
-};
-
-       
-/* When the kernel dumps core, it starts by dumping the user struct -
-   this will be used by gdb to figure out where the data and stack segments
-   are within the file, and what virtual addresses to use. */
-struct user{
-/* We start with the registers, to mimic the way that "memory" is returned
-   from the ptrace(3,...) function.  */
-  struct user_regs_struct regs;        /* Where the registers are actually stored */
-/* ptrace does not yet supply these.  Someday.... */
-/* The rest of this junk is to help gdb figure out what goes where */
-  unsigned long int u_tsize;   /* Text segment size (pages). */
-  unsigned long int u_dsize;   /* Data segment size (pages). */
-  unsigned long int u_ssize;   /* Stack segment size (pages). */
-  unsigned long start_code;     /* Starting virtual address of text. */
-  unsigned long start_stack;   /* Starting virtual address of stack area.
-                                  This is actually the bottom of the stack,
-                                  the top of the stack is always found in the
-                                  esp register.  */
-  long int signal;                     /* Signal that caused the core dump. */
-  int reserved;                        /* No longer used */
-  unsigned long u_ar0;         /* Used by gdb to help find the values for */
-                               /* the registers. */
-  unsigned long magic;         /* To uniquely identify a core file */
-  char u_comm[32];             /* User command that was responsible */
-};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
-#endif
diff --git a/arch/h8300/include/asm/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
deleted file mode 100644 (file)
index 19cfd62..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __H8300_VIRT_CONVERT__
-#define __H8300_VIRT_CONVERT__
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-
-#ifdef __KERNEL__
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define phys_to_virt(vaddr)    ((void *) (vaddr))
-#define virt_to_phys(vaddr)    ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-#endif
-#endif
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
deleted file mode 100644 (file)
index 040178c..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# UAPI Header export list
-include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/h8300/include/uapi/asm/auxvec.h b/arch/h8300/include/uapi/asm/auxvec.h
deleted file mode 100644 (file)
index 1d36fe3..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMH8300_AUXVEC_H
-#define __ASMH8300_AUXVEC_H
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
deleted file mode 100644 (file)
index 6dc0bb0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/h8300/include/uapi/asm/byteorder.h b/arch/h8300/include/uapi/asm/byteorder.h
deleted file mode 100644 (file)
index 13539da..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_BYTEORDER_H
-#define _H8300_BYTEORDER_H
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _H8300_BYTEORDER_H */
diff --git a/arch/h8300/include/uapi/asm/errno.h b/arch/h8300/include/uapi/asm/errno.h
deleted file mode 100644 (file)
index 0c2f564..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_ERRNO_H
-#define _H8300_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _H8300_ERRNO_H */
diff --git a/arch/h8300/include/uapi/asm/fcntl.h b/arch/h8300/include/uapi/asm/fcntl.h
deleted file mode 100644 (file)
index 1952cb2..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _H8300_FCNTL_H
-#define _H8300_FCNTL_H
-
-#define O_DIRECTORY    040000  /* must be a directory */
-#define O_NOFOLLOW     0100000 /* don't follow links */
-#define O_DIRECT       0200000 /* direct disk access hint - currently ignored */
-#define O_LARGEFILE    0400000
-
-#include <asm-generic/fcntl.h>
-
-#endif /* _H8300_FCNTL_H */
diff --git a/arch/h8300/include/uapi/asm/ioctl.h b/arch/h8300/include/uapi/asm/ioctl.h
deleted file mode 100644 (file)
index b279fe0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/h8300/include/uapi/asm/ioctls.h b/arch/h8300/include/uapi/asm/ioctls.h
deleted file mode 100644 (file)
index 30eaed2..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ARCH_H8300_IOCTLS_H__
-#define __ARCH_H8300_IOCTLS_H__
-
-#define FIOQSIZE       0x545E
-
-#include <asm-generic/ioctls.h>
-
-#endif /* __ARCH_H8300_IOCTLS_H__ */
diff --git a/arch/h8300/include/uapi/asm/ipcbuf.h b/arch/h8300/include/uapi/asm/ipcbuf.h
deleted file mode 100644 (file)
index 84c7e51..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/h8300/include/uapi/asm/kvm_para.h b/arch/h8300/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/include/uapi/asm/mman.h b/arch/h8300/include/uapi/asm/mman.h
deleted file mode 100644 (file)
index 8eebf89..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/h8300/include/uapi/asm/msgbuf.h b/arch/h8300/include/uapi/asm/msgbuf.h
deleted file mode 100644 (file)
index 6b148cd..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_MSGBUF_H
-#define _H8300_MSGBUF_H
-
-/* 
- * The msqid64_ds structure for H8/300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
-       struct ipc64_perm msg_perm;
-       __kernel_time_t msg_stime;      /* last msgsnd time */
-       unsigned long   __unused1;
-       __kernel_time_t msg_rtime;      /* last msgrcv time */
-       unsigned long   __unused2;
-       __kernel_time_t msg_ctime;      /* last change time */
-       unsigned long   __unused3;
-       unsigned long  msg_cbytes;      /* current number of bytes on queue */
-       unsigned long  msg_qnum;        /* number of messages in queue */
-       unsigned long  msg_qbytes;      /* max number of bytes on queue */
-       __kernel_pid_t msg_lspid;       /* pid of last msgsnd */
-       __kernel_pid_t msg_lrpid;       /* last receive pid */
-       unsigned long  __unused4;
-       unsigned long  __unused5;
-};
-
-#endif /* _H8300_MSGBUF_H */
diff --git a/arch/h8300/include/uapi/asm/param.h b/arch/h8300/include/uapi/asm/param.h
deleted file mode 100644 (file)
index 3dd18ae..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _UAPI_H8300_PARAM_H
-#define _UAPI_H8300_PARAM_H
-
-#ifndef __KERNEL__
-#define HZ             100
-#endif
-
-#define EXEC_PAGESIZE  4096
-
-#ifndef NOGROUP
-#define NOGROUP                (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64      /* max length of hostname */
-
-#endif /* _UAPI_H8300_PARAM_H */
diff --git a/arch/h8300/include/uapi/asm/poll.h b/arch/h8300/include/uapi/asm/poll.h
deleted file mode 100644 (file)
index f61540c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __H8300_POLL_H
-#define __H8300_POLL_H
-
-#define POLLWRNORM     POLLOUT
-#define POLLWRBAND     256
-
-#include <asm-generic/poll.h>
-
-#undef POLLREMOVE
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h
deleted file mode 100644 (file)
index 91e62ba..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ARCH_H8300_POSIX_TYPES_H
-#define __ARCH_H8300_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc.  Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned short __kernel_mode_t;
-#define __kernel_mode_t __kernel_mode_t
-
-typedef unsigned short __kernel_ipc_pid_t;
-#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-
-typedef unsigned short __kernel_uid_t;
-typedef unsigned short __kernel_gid_t;
-#define __kernel_uid_t __kernel_uid_t
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-#define __kernel_old_uid_t __kernel_old_uid_t
-
-#include <asm-generic/posix_types.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/ptrace.h b/arch/h8300/include/uapi/asm/ptrace.h
deleted file mode 100644 (file)
index ef39ec5..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_PTRACE_H
-#define _UAPI_H8300_PTRACE_H
-
-#ifndef __ASSEMBLY__
-
-#define PT_ER1    0
-#define PT_ER2    1
-#define PT_ER3    2
-#define PT_ER4    3
-#define PT_ER5    4
-#define PT_ER6    5
-#define PT_ER0    6
-#define PT_ORIG_ER0       7
-#define PT_CCR    8
-#define PT_PC     9
-#define PT_USP    10
-#define PT_EXR     12
-
-/* this struct defines the way the registers are stored on the
-   stack during a system call. */
-
-struct pt_regs {
-       long     retpc;
-       long     er4;
-       long     er5;
-       long     er6;
-       long     er3;
-       long     er2;
-       long     er1;
-       long     orig_er0;
-       unsigned short ccr;
-       long     er0;
-       long     vector;
-#if defined(CONFIG_CPU_H8S)
-       unsigned short exr;
-#endif
-       unsigned long  pc;
-} __attribute__((aligned(2),packed));
-
-#define PTRACE_GETREGS            12
-#define PTRACE_SETREGS            13
-
-#endif /* __ASSEMBLY__ */
-#endif /* _UAPI_H8300_PTRACE_H */
diff --git a/arch/h8300/include/uapi/asm/resource.h b/arch/h8300/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 46c5f43..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_RESOURCE_H
-#define _H8300_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _H8300_RESOURCE_H */
diff --git a/arch/h8300/include/uapi/asm/sembuf.h b/arch/h8300/include/uapi/asm/sembuf.h
deleted file mode 100644 (file)
index e04a3ec..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_SEMBUF_H
-#define _H8300_SEMBUF_H
-
-/* 
- * The semid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
-       struct ipc64_perm sem_perm;             /* permissions .. see ipc.h */
-       __kernel_time_t sem_otime;              /* last semop time */
-       unsigned long   __unused1;
-       __kernel_time_t sem_ctime;              /* last change time */
-       unsigned long   __unused2;
-       unsigned long   sem_nsems;              /* no. of semaphores in array */
-       unsigned long   __unused3;
-       unsigned long   __unused4;
-};
-
-#endif /* _H8300_SEMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/setup.h b/arch/h8300/include/uapi/asm/setup.h
deleted file mode 100644 (file)
index e2c600e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SETUP_H
-#define __H8300_SETUP_H
-
-#define COMMAND_LINE_SIZE      512
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/shmbuf.h b/arch/h8300/include/uapi/asm/shmbuf.h
deleted file mode 100644 (file)
index 64e7799..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _H8300_SHMBUF_H
-#define _H8300_SHMBUF_H
-
-/* 
- * The shmid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
-       struct ipc64_perm       shm_perm;       /* operation perms */
-       size_t                  shm_segsz;      /* size of segment (bytes) */
-       __kernel_time_t         shm_atime;      /* last attach time */
-       unsigned long           __unused1;
-       __kernel_time_t         shm_dtime;      /* last detach time */
-       unsigned long           __unused2;
-       __kernel_time_t         shm_ctime;      /* last change time */
-       unsigned long           __unused3;
-       __kernel_pid_t          shm_cpid;       /* pid of creator */
-       __kernel_pid_t          shm_lpid;       /* pid of last operator */
-       unsigned long           shm_nattch;     /* no. of current attaches */
-       unsigned long           __unused4;
-       unsigned long           __unused5;
-};
-
-struct shminfo64 {
-       unsigned long   shmmax;
-       unsigned long   shmmin;
-       unsigned long   shmmni;
-       unsigned long   shmseg;
-       unsigned long   shmall;
-       unsigned long   __unused1;
-       unsigned long   __unused2;
-       unsigned long   __unused3;
-       unsigned long   __unused4;
-};
-
-#endif /* _H8300_SHMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/sigcontext.h b/arch/h8300/include/uapi/asm/sigcontext.h
deleted file mode 100644 (file)
index e4b8150..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_H8300_SIGCONTEXT_H
-#define _ASM_H8300_SIGCONTEXT_H
-
-struct sigcontext {
-       unsigned long  sc_mask;         /* old sigmask */
-       unsigned long  sc_usp;          /* old user stack pointer */
-       unsigned long  sc_er0;
-       unsigned long  sc_er1;
-       unsigned long  sc_er2;
-       unsigned long  sc_er3;
-       unsigned long  sc_er4;
-       unsigned long  sc_er5;
-       unsigned long  sc_er6;
-       unsigned short sc_ccr;
-       unsigned long  sc_pc;
-};
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/siginfo.h b/arch/h8300/include/uapi/asm/siginfo.h
deleted file mode 100644 (file)
index bc8fbea..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SIGINFO_H
-#define _H8300_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h
deleted file mode 100644 (file)
index af3a6c3..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _UAPI_H8300_SIGNAL_H
-#define _UAPI_H8300_SIGNAL_H
-
-#include <linux/types.h>
-
-/* Avoid too many header ordering problems.  */
-struct siginfo;
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers.  */
-
-#define NSIG           32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP          1
-#define SIGINT          2
-#define SIGQUIT                 3
-#define SIGILL          4
-#define SIGTRAP                 5
-#define SIGABRT                 6
-#define SIGIOT          6
-#define SIGBUS          7
-#define SIGFPE          8
-#define SIGKILL                 9
-#define SIGUSR1                10
-#define SIGSEGV                11
-#define SIGUSR2                12
-#define SIGPIPE                13
-#define SIGALRM                14
-#define SIGTERM                15
-#define SIGSTKFLT      16
-#define SIGCHLD                17
-#define SIGCONT                18
-#define SIGSTOP                19
-#define SIGTSTP                20
-#define SIGTTIN                21
-#define SIGTTOU                22
-#define SIGURG         23
-#define SIGXCPU                24
-#define SIGXFSZ                25
-#define SIGVTALRM      26
-#define SIGPROF                27
-#define SIGWINCH       28
-#define SIGIO          29
-#define SIGPOLL                SIGIO
-/*
-#define SIGLOST                29
-*/
-#define SIGPWR         30
-#define SIGSYS         31
-#define        SIGUNUSED       31
-
-/* These should not be considered constants from userland.  */
-#define SIGRTMIN       32
-#define SIGRTMAX       _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP   0x00000001
-#define SA_NOCLDWAIT   0x00000002 /* not supported yet */
-#define SA_SIGINFO     0x00000004
-#define SA_ONSTACK     0x08000000
-#define SA_RESTART     0x10000000
-#define SA_NODEFER     0x40000000
-#define SA_RESETHAND   0x80000000
-
-#define SA_NOMASK      SA_NODEFER
-#define SA_ONESHOT     SA_RESETHAND
-
-#define SA_RESTORER    0x04000000
-
-#define MINSIGSTKSZ    2048
-#define SIGSTKSZ       8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers.  */
-
-struct sigaction {
-       union {
-         __sighandler_t _sa_handler;
-         void (*_sa_sigaction)(int, struct siginfo *, void *);
-       } _u;
-       sigset_t sa_mask;
-       unsigned long sa_flags;
-       void (*sa_restorer)(void);
-};
-
-#define sa_handler     _u._sa_handler
-#define sa_sigaction   _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
-       void *ss_sp;
-       int ss_flags;
-       size_t ss_size;
-} stack_t;
-
-
-#endif /* _UAPI_H8300_SIGNAL_H */
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
deleted file mode 100644 (file)
index 9490758..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET     1
-
-#define SO_DEBUG       1
-#define SO_REUSEADDR   2
-#define SO_TYPE                3
-#define SO_ERROR       4
-#define SO_DONTROUTE   5
-#define SO_BROADCAST   6
-#define SO_SNDBUF      7
-#define SO_RCVBUF      8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE   9
-#define SO_OOBINLINE   10
-#define SO_NO_CHECK    11
-#define SO_PRIORITY    12
-#define SO_LINGER      13
-#define SO_BSDCOMPAT   14
-#define SO_REUSEPORT   15
-#define SO_PASSCRED    16
-#define SO_PEERCRED    17
-#define SO_RCVLOWAT    18
-#define SO_SNDLOWAT    19
-#define SO_RCVTIMEO    20
-#define SO_SNDTIMEO    21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION             22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT       23
-#define SO_SECURITY_ENCRYPTION_NETWORK         24
-
-#define SO_BINDTODEVICE        25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER        26
-#define SO_DETACH_FILTER        27
-#define SO_GET_FILTER          SO_ATTACH_FILTER
-
-#define SO_PEERNAME             28
-#define SO_TIMESTAMP           29
-#define SCM_TIMESTAMP          SO_TIMESTAMP
-
-#define SO_ACCEPTCONN          30
-
-#define SO_PEERSEC             31
-#define SO_PASSSEC             34
-#define SO_TIMESTAMPNS         35
-#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
-
-#define SO_MARK                        36
-
-#define SO_TIMESTAMPING                37
-#define SCM_TIMESTAMPING       SO_TIMESTAMPING
-
-#define SO_PROTOCOL            38
-#define SO_DOMAIN              39
-
-#define SO_RXQ_OVFL             40
-
-#define SO_WIFI_STATUS         41
-#define SCM_WIFI_STATUS                SO_WIFI_STATUS
-#define SO_PEEK_OFF            42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS               43
-
-#define SO_LOCK_FILTER         44
-
-#define SO_SELECT_ERR_QUEUE    45
-
-#define SO_BUSY_POLL           46
-
-#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/uapi/asm/sockios.h b/arch/h8300/include/uapi/asm/sockios.h
deleted file mode 100644 (file)
index e9c7ec8..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_SOCKIOS__
-#define __ARCH_H8300_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN      0x8901
-#define SIOCSPGRP      0x8902
-#define FIOGETOWN      0x8903
-#define SIOCGPGRP      0x8904
-#define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
-#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
-
-#endif /* __ARCH_H8300_SOCKIOS__ */
diff --git a/arch/h8300/include/uapi/asm/stat.h b/arch/h8300/include/uapi/asm/stat.h
deleted file mode 100644 (file)
index 62c3cc2..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_STAT_H
-#define _H8300_STAT_H
-
-struct __old_kernel_stat {
-       unsigned short st_dev;
-       unsigned short st_ino;
-       unsigned short st_mode;
-       unsigned short st_nlink;
-       unsigned short st_uid;
-       unsigned short st_gid;
-       unsigned short st_rdev;
-       unsigned long  st_size;
-       unsigned long  st_atime;
-       unsigned long  st_mtime;
-       unsigned long  st_ctime;
-};
-
-struct stat {
-       unsigned short st_dev;
-       unsigned short __pad1;
-       unsigned long st_ino;
-       unsigned short st_mode;
-       unsigned short st_nlink;
-       unsigned short st_uid;
-       unsigned short st_gid;
-       unsigned short st_rdev;
-       unsigned short __pad2;
-       unsigned long  st_size;
-       unsigned long  st_blksize;
-       unsigned long  st_blocks;
-       unsigned long  st_atime;
-       unsigned long  __unused1;
-       unsigned long  st_mtime;
-       unsigned long  __unused2;
-       unsigned long  st_ctime;
-       unsigned long  __unused3;
-       unsigned long  __unused4;
-       unsigned long  __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
-       unsigned long long      st_dev;
-       unsigned char   __pad1[2];
-
-#define STAT64_HAS_BROKEN_ST_INO       1
-       unsigned long   __st_ino;
-
-       unsigned int    st_mode;
-       unsigned int    st_nlink;
-
-       unsigned long   st_uid;
-       unsigned long   st_gid;
-
-       unsigned long long      st_rdev;
-       unsigned char   __pad3[2];
-
-       long long       st_size;
-       unsigned long   st_blksize;
-
-       unsigned long   __pad4;         /* future possible st_blocks high bits */
-       unsigned long   st_blocks;      /* Number 512-byte blocks allocated. */
-
-       unsigned long   st_atime;
-       unsigned long   st_atime_nsec;
-
-       unsigned long   st_mtime;
-       unsigned long   st_mtime_nsec;
-
-       unsigned long   st_ctime;
-       unsigned long   st_ctime_nsec;
-
-       unsigned long long      st_ino;
-};
-
-#endif /* _H8300_STAT_H */
diff --git a/arch/h8300/include/uapi/asm/statfs.h b/arch/h8300/include/uapi/asm/statfs.h
deleted file mode 100644 (file)
index b96efa7..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_STATFS_H
-#define _H8300_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _H8300_STATFS_H */
diff --git a/arch/h8300/include/uapi/asm/swab.h b/arch/h8300/include/uapi/asm/swab.h
deleted file mode 100644 (file)
index 39abbf5..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _H8300_SWAB_H
-#define _H8300_SWAB_H
-
-#include <linux/types.h>
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* _H8300_SWAB_H */
diff --git a/arch/h8300/include/uapi/asm/termbits.h b/arch/h8300/include/uapi/asm/termbits.h
deleted file mode 100644 (file)
index 3287a62..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef __ARCH_H8300_TERMBITS_H__
-#define __ARCH_H8300_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
-typedef unsigned int   tcflag_t;
-
-#define NCCS 19
-struct termios {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-};
-
-struct termios2 {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-       speed_t c_ispeed;               /* input speed */
-       speed_t c_ospeed;               /* output speed */
-};
-
-struct ktermios {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-       speed_t c_ispeed;               /* input speed */
-       speed_t c_ospeed;               /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IUCLC  0001000
-#define IXON   0002000
-#define IXANY  0004000
-#define IXOFF  0010000
-#define IMAXBEL        0020000
-#define IUTF8  0040000
-
-/* c_oflag bits */
-#define OPOST  0000001
-#define OLCUC  0000002
-#define ONLCR  0000004
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-#define OFILL  0000100
-#define OFDEL  0000200
-#define NLDLY  0000400
-#define   NL0  0000000
-#define   NL1  0000400
-#define CRDLY  0003000
-#define   CR0  0000000
-#define   CR1  0001000
-#define   CR2  0002000
-#define   CR3  0003000
-#define TABDLY 0014000
-#define   TAB0 0000000
-#define   TAB1 0004000
-#define   TAB2 0010000
-#define   TAB3 0014000
-#define   XTABS        0014000
-#define BSDLY  0020000
-#define   BS0  0000000
-#define   BS1  0020000
-#define VTDLY  0040000
-#define   VT0  0000000
-#define   VT1  0040000
-#define FFDLY  0100000
-#define   FF0  0000000
-#define   FF1  0100000
-
-/* c_cflag bit meaning */
-#define CBAUD  0010017
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE  0000060
-#define   CS5  0000000
-#define   CS6  0000020
-#define   CS7  0000040
-#define   CS8  0000060
-#define CSTOPB 0000100
-#define CREAD  0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL  0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define    BOTHER 0010000
-#define    B57600 0010001
-#define   B115200 0010002
-#define   B230400 0010003
-#define   B460800 0010004
-#define   B500000 0010005
-#define   B576000 0010006
-#define   B921600 0010007
-#define  B1000000 0010010
-#define  B1152000 0010011
-#define  B1500000 0010012
-#define  B2000000 0010013
-#define  B2500000 0010014
-#define  B3000000 0010015
-#define  B3500000 0010016
-#define  B4000000 0010017
-#define CIBAUD   002003600000          /* input baud rate */
-#define CMSPAR   010000000000          /* mark or space (stick) parity */
-#define CRTSCTS          020000000000          /* flow control */
-
-#define IBSHIFT          16                    /* shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG   0000001
-#define ICANON 0000002
-#define XCASE  0000004
-#define ECHO   0000010
-#define ECHOE  0000020
-#define ECHOK  0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL        0001000
-#define ECHOPRT        0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC        0200000
-
-
-/* tcflow() and TCXONC use these */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
-
-/* tcsetattr uses these */
-#define        TCSANOW         0
-#define        TCSADRAIN       1
-#define        TCSAFLUSH       2
-
-#endif /* __ARCH_H8300_TERMBITS_H__ */
diff --git a/arch/h8300/include/uapi/asm/termios.h b/arch/h8300/include/uapi/asm/termios.h
deleted file mode 100644 (file)
index 5a67d7e..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_TERMIOS_H
-#define _UAPI_H8300_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-struct winsize {
-       unsigned short ws_row;
-       unsigned short ws_col;
-       unsigned short ws_xpixel;
-       unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
-       unsigned short c_iflag;         /* input mode flags */
-       unsigned short c_oflag;         /* output mode flags */
-       unsigned short c_cflag;         /* control mode flags */
-       unsigned short c_lflag;         /* local mode flags */
-       unsigned char c_line;           /* line discipline */
-       unsigned char c_cc[NCC];        /* control characters */
-};
-
-
-/* modem lines */
-#define TIOCM_LE       0x001
-#define TIOCM_DTR      0x002
-#define TIOCM_RTS      0x004
-#define TIOCM_ST       0x008
-#define TIOCM_SR       0x010
-#define TIOCM_CTS      0x020
-#define TIOCM_CAR      0x040
-#define TIOCM_RNG      0x080
-#define TIOCM_DSR      0x100
-#define TIOCM_CD       TIOCM_CAR
-#define TIOCM_RI       TIOCM_RNG
-#define TIOCM_OUT1     0x2000
-#define TIOCM_OUT2     0x4000
-#define TIOCM_LOOP     0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-
-#endif /* _UAPI_H8300_TERMIOS_H */
diff --git a/arch/h8300/include/uapi/asm/types.h b/arch/h8300/include/uapi/asm/types.h
deleted file mode 100644 (file)
index 9ec9d4c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/int-ll64.h>
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
deleted file mode 100644 (file)
index 8cb5d42..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-#ifndef _UAPI_ASM_H8300_UNISTD_H_
-#define _UAPI_ASM_H8300_UNISTD_H_
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall      0
-#define __NR_exit                1
-#define __NR_fork                2
-#define __NR_read                3
-#define __NR_write               4
-#define __NR_open                5
-#define __NR_close               6
-#define __NR_waitpid             7
-#define __NR_creat               8
-#define __NR_link                9
-#define __NR_unlink             10
-#define __NR_execve             11
-#define __NR_chdir              12
-#define __NR_time               13
-#define __NR_mknod              14
-#define __NR_chmod              15
-#define __NR_lchown             16
-#define __NR_break              17
-#define __NR_oldstat            18
-#define __NR_lseek              19
-#define __NR_getpid             20
-#define __NR_mount              21
-#define __NR_umount             22
-#define __NR_setuid             23
-#define __NR_getuid             24
-#define __NR_stime              25
-#define __NR_ptrace             26
-#define __NR_alarm              27
-#define __NR_oldfstat           28
-#define __NR_pause              29
-#define __NR_utime              30
-#define __NR_stty               31
-#define __NR_gtty               32
-#define __NR_access             33
-#define __NR_nice               34
-#define __NR_ftime              35
-#define __NR_sync               36
-#define __NR_kill               37
-#define __NR_rename             38
-#define __NR_mkdir              39
-#define __NR_rmdir              40
-#define __NR_dup                41
-#define __NR_pipe               42
-#define __NR_times              43
-#define __NR_prof               44
-#define __NR_brk                45
-#define __NR_setgid             46
-#define __NR_getgid             47
-#define __NR_signal             48
-#define __NR_geteuid            49
-#define __NR_getegid            50
-#define __NR_acct               51
-#define __NR_umount2            52
-#define __NR_lock               53
-#define __NR_ioctl              54
-#define __NR_fcntl              55
-#define __NR_mpx                56
-#define __NR_setpgid            57
-#define __NR_ulimit             58
-#define __NR_oldolduname        59
-#define __NR_umask              60
-#define __NR_chroot             61
-#define __NR_ustat              62
-#define __NR_dup2               63
-#define __NR_getppid            64
-#define __NR_getpgrp            65
-#define __NR_setsid             66
-#define __NR_sigaction          67
-#define __NR_sgetmask           68
-#define __NR_ssetmask           69
-#define __NR_setreuid           70
-#define __NR_setregid           71
-#define __NR_sigsuspend                 72
-#define __NR_sigpending                 73
-#define __NR_sethostname        74
-#define __NR_setrlimit          75
-#define __NR_getrlimit          76
-#define __NR_getrusage          77
-#define __NR_gettimeofday       78
-#define __NR_settimeofday       79
-#define __NR_getgroups          80
-#define __NR_setgroups          81
-#define __NR_select             82
-#define __NR_symlink            83
-#define __NR_oldlstat           84
-#define __NR_readlink           85
-#define __NR_uselib             86
-#define __NR_swapon             87
-#define __NR_reboot             88
-#define __NR_readdir            89
-#define __NR_mmap               90
-#define __NR_munmap             91
-#define __NR_truncate           92
-#define __NR_ftruncate          93
-#define __NR_fchmod             94
-#define __NR_fchown             95
-#define __NR_getpriority        96
-#define __NR_setpriority        97
-#define __NR_profil             98
-#define __NR_statfs             99
-#define __NR_fstatfs           100
-#define __NR_ioperm            101
-#define __NR_socketcall                102
-#define __NR_syslog            103
-#define __NR_setitimer         104
-#define __NR_getitimer         105
-#define __NR_stat              106
-#define __NR_lstat             107
-#define __NR_fstat             108
-#define __NR_olduname          109
-#define __NR_iopl              110
-#define __NR_vhangup           111
-#define __NR_idle              112
-#define __NR_vm86old           113
-#define __NR_wait4             114
-#define __NR_swapoff           115
-#define __NR_sysinfo           116
-#define __NR_ipc               117
-#define __NR_fsync             118
-#define __NR_sigreturn         119
-#define __NR_clone             120
-#define __NR_setdomainname     121
-#define __NR_uname             122
-#define __NR_modify_ldt                123
-#define __NR_adjtimex          124
-#define __NR_mprotect          125
-#define __NR_sigprocmask       126
-#define __NR_create_module     127
-#define __NR_init_module       128
-#define __NR_delete_module     129
-#define __NR_get_kernel_syms   130
-#define __NR_quotactl          131
-#define __NR_getpgid           132
-#define __NR_fchdir            133
-#define __NR_bdflush           134
-#define __NR_sysfs             135
-#define __NR_personality       136
-#define __NR_afs_syscall       137 /* Syscall for Andrew File System */
-#define __NR_setfsuid          138
-#define __NR_setfsgid          139
-#define __NR__llseek           140
-#define __NR_getdents          141
-#define __NR__newselect                142
-#define __NR_flock             143
-#define __NR_msync             144
-#define __NR_readv             145
-#define __NR_writev            146
-#define __NR_getsid            147
-#define __NR_fdatasync         148
-#define __NR__sysctl           149
-#define __NR_mlock             150
-#define __NR_munlock           151
-#define __NR_mlockall          152
-#define __NR_munlockall                153
-#define __NR_sched_setparam            154
-#define __NR_sched_getparam            155
-#define __NR_sched_setscheduler                156
-#define __NR_sched_getscheduler                157
-#define __NR_sched_yield               158
-#define __NR_sched_get_priority_max    159
-#define __NR_sched_get_priority_min    160
-#define __NR_sched_rr_get_interval     161
-#define __NR_nanosleep         162
-#define __NR_mremap            163
-#define __NR_setresuid         164
-#define __NR_getresuid         165
-#define __NR_vm86              166
-#define __NR_query_module      167
-#define __NR_poll              168
-#define __NR_nfsservctl                169
-#define __NR_setresgid         170
-#define __NR_getresgid         171
-#define __NR_prctl             172
-#define __NR_rt_sigreturn      173
-#define __NR_rt_sigaction      174
-#define __NR_rt_sigprocmask    175
-#define __NR_rt_sigpending     176
-#define __NR_rt_sigtimedwait   177
-#define __NR_rt_sigqueueinfo   178
-#define __NR_rt_sigsuspend     179
-#define __NR_pread64           180
-#define __NR_pwrite64          181
-#define __NR_chown             182
-#define __NR_getcwd            183
-#define __NR_capget            184
-#define __NR_capset            185
-#define __NR_sigaltstack       186
-#define __NR_sendfile          187
-#define __NR_getpmsg           188     /* some people actually want streams */
-#define __NR_putpmsg           189     /* some people actually want streams */
-#define __NR_vfork             190
-#define __NR_ugetrlimit                191
-#define __NR_mmap2             192
-#define __NR_truncate64                193
-#define __NR_ftruncate64       194
-#define __NR_stat64            195
-#define __NR_lstat64           196
-#define __NR_fstat64           197
-#define __NR_lchown32          198
-#define __NR_getuid32          199
-#define __NR_getgid32          200
-#define __NR_geteuid32         201
-#define __NR_getegid32         202
-#define __NR_setreuid32                203
-#define __NR_setregid32                204
-#define __NR_getgroups32       205
-#define __NR_setgroups32       206
-#define __NR_fchown32          207
-#define __NR_setresuid32       208
-#define __NR_getresuid32       209
-#define __NR_setresgid32       210
-#define __NR_getresgid32       211
-#define __NR_chown32           212
-#define __NR_setuid32          213
-#define __NR_setgid32          214
-#define __NR_setfsuid32                215
-#define __NR_setfsgid32                216
-#define __NR_pivot_root                217
-#define __NR_mincore           218
-#define __NR_madvise           219
-#define __NR_madvise1          219
-#define __NR_getdents64                220
-#define __NR_fcntl64           221
-/* 223 is unused */
-#define __NR_gettid            224
-#define __NR_readahead         225
-#define __NR_setxattr          226
-#define __NR_lsetxattr         227
-#define __NR_fsetxattr         228
-#define __NR_getxattr          229
-#define __NR_lgetxattr         230
-#define __NR_fgetxattr         231
-#define __NR_listxattr         232
-#define __NR_llistxattr                233
-#define __NR_flistxattr                234
-#define __NR_removexattr       235
-#define __NR_lremovexattr      236
-#define __NR_fremovexattr      237
-#define __NR_tkill             238
-#define __NR_sendfile64                239
-#define __NR_futex             240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area   243
-#define __NR_get_thread_area   244
-#define __NR_io_setup          245
-#define __NR_io_destroy                246
-#define __NR_io_getevents      247
-#define __NR_io_submit         248
-#define __NR_io_cancel         249
-#define __NR_fadvise64         250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group                252
-#define __NR_lookup_dcookie    253
-#define __NR_epoll_create      254
-#define __NR_epoll_ctl         255
-#define __NR_epoll_wait                256
-#define __NR_remap_file_pages  257
-#define __NR_set_tid_address   258
-#define __NR_timer_create      259
-#define __NR_timer_settime     (__NR_timer_create+1)
-#define __NR_timer_gettime     (__NR_timer_create+2)
-#define __NR_timer_getoverrun  (__NR_timer_create+3)
-#define __NR_timer_delete      (__NR_timer_create+4)
-#define __NR_clock_settime     (__NR_timer_create+5)
-#define __NR_clock_gettime     (__NR_timer_create+6)
-#define __NR_clock_getres      (__NR_timer_create+7)
-#define __NR_clock_nanosleep   (__NR_timer_create+8)
-#define __NR_statfs64          268
-#define __NR_fstatfs64         269
-#define __NR_tgkill            270
-#define __NR_utimes            271
-#define __NR_fadvise64_64      272
-#define __NR_vserver           273
-#define __NR_mbind             274
-#define __NR_get_mempolicy     275
-#define __NR_set_mempolicy     276
-#define __NR_mq_open           277
-#define __NR_mq_unlink         (__NR_mq_open+1)
-#define __NR_mq_timedsend      (__NR_mq_open+2)
-#define __NR_mq_timedreceive   (__NR_mq_open+3)
-#define __NR_mq_notify         (__NR_mq_open+4)
-#define __NR_mq_getsetattr     (__NR_mq_open+5)
-#define __NR_kexec_load                283
-#define __NR_waitid            284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key           286
-#define __NR_request_key       287
-#define __NR_keyctl            288
-#define __NR_ioprio_set                289
-#define __NR_ioprio_get                290
-#define __NR_inotify_init      291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch  293
-#define __NR_migrate_pages     294
-#define __NR_openat            295
-#define __NR_mkdirat           296
-#define __NR_mknodat           297
-#define __NR_fchownat          298
-#define __NR_futimesat         299
-#define __NR_fstatat64         300
-#define __NR_unlinkat          301
-#define __NR_renameat          302
-#define __NR_linkat            303
-#define __NR_symlinkat         304
-#define __NR_readlinkat                305
-#define __NR_fchmodat          306
-#define __NR_faccessat         307
-#define __NR_pselect6          308
-#define __NR_ppoll             309
-#define __NR_unshare           310
-#define __NR_set_robust_list   311
-#define __NR_get_robust_list   312
-#define __NR_splice            313
-#define __NR_sync_file_range   314
-#define __NR_tee               315
-#define __NR_vmsplice          316
-#define __NR_move_pages                317
-#define __NR_getcpu            318
-#define __NR_epoll_pwait       319
-#define __NR_setns             320
-
-#endif /* _UAPI_ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
deleted file mode 100644 (file)
index 1cc57f8..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := vmlinux.lds
-
-obj-y := process.o traps.o ptrace.o irq.o \
-        sys_h8300.o time.o signal.o \
-         setup.o gpio.o syscalls.o \
-        entry.o timer/
-
-obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o 
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
deleted file mode 100644 (file)
index fd961e0..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This program is used to generate definitions needed by
- * assembly language modules.
- *
- * We use the technique used in the OSF Mach kernel code:
- * generate asm statements containing #defines,
- * compile this file to assembler, and then extract the
- * #defines from the assembly-language output.
- */
-
-#include <linux/stddef.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/ptrace.h>
-#include <linux/hardirq.h>
-#include <linux/kbuild.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/ptrace.h>
-
-int main(void)
-{
-       /* offsets into the task struct */
-       DEFINE(TASK_STATE, offsetof(struct task_struct, state));
-       DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
-       DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
-       DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
-       DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
-       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
-       DEFINE(TASK_MM, offsetof(struct task_struct, mm));
-       DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-
-       /* offsets into the irq_cpustat_t struct */
-       DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
-
-       /* offsets into the thread struct */
-       DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
-       DEFINE(THREAD_CCR, offsetof(struct thread_struct, ccr));
-
-       /* offsets into the pt_regs struct */
-       DEFINE(LER0,  offsetof(struct pt_regs, er0)      - sizeof(long));
-       DEFINE(LER1,  offsetof(struct pt_regs, er1)      - sizeof(long));
-       DEFINE(LER2,  offsetof(struct pt_regs, er2)      - sizeof(long));
-       DEFINE(LER3,  offsetof(struct pt_regs, er3)      - sizeof(long));
-       DEFINE(LER4,  offsetof(struct pt_regs, er4)      - sizeof(long));
-       DEFINE(LER5,  offsetof(struct pt_regs, er5)      - sizeof(long));
-       DEFINE(LER6,  offsetof(struct pt_regs, er6)      - sizeof(long));
-       DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long));
-       DEFINE(LCCR,  offsetof(struct pt_regs, ccr)      - sizeof(long));
-       DEFINE(LVEC,  offsetof(struct pt_regs, vector)   - sizeof(long));
-#if defined(__H8300S__)
-       DEFINE(LEXR,  offsetof(struct pt_regs, exr)      - sizeof(long));
-#endif
-       DEFINE(LRET,  offsetof(struct pt_regs, pc)       - sizeof(long));
-
-       DEFINE(PT_PTRACED, PT_PTRACED);
-
-       return 0;
-}
diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S
deleted file mode 100644 (file)
index 94bd30f..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-/* -*- mode: asm -*-
- *
- *  linux/arch/h8300/platform/h8300h/entry.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *  David McCullough <davidm@snapgear.com>
- *
- */
-
-/*
- *  entry.S
- *  include exception/interrupt gateway
- *          system call entry
- */
-
-#include <linux/sys.h>
-#include <asm/unistd.h>
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/errno.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define USERRET 8
-INTERRUPTS = 64
-       .h8300h
-       .macro  SHLL2 reg
-       shll.l  \reg
-       shll.l  \reg
-       .endm
-       .macro  SHLR2 reg
-       shlr.l  \reg
-       shlr.l  \reg
-       .endm
-       .macro  SAVEREGS
-       mov.l   er0,@-sp
-       mov.l   er1,@-sp
-       mov.l   er2,@-sp
-       mov.l   er3,@-sp
-       .endm
-       .macro  RESTOREREGS
-       mov.l   @sp+,er3
-       mov.l   @sp+,er2
-       .endm
-       .macro  SAVEEXR
-       .endm
-       .macro  RESTOREEXR
-       .endm
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define USERRET 10
-#define USEREXR 8
-INTERRUPTS = 128
-       .h8300s
-       .macro  SHLL2 reg
-       shll.l  #2,\reg
-       .endm
-       .macro  SHLR2 reg
-       shlr.l  #2,\reg
-       .endm
-       .macro  SAVEREGS
-       stm.l   er0-er3,@-sp
-       .endm
-       .macro  RESTOREREGS
-       ldm.l   @sp+,er2-er3
-       .endm
-       .macro  SAVEEXR
-       mov.w   @(USEREXR:16,er0),r1
-       mov.w   r1,@(LEXR-LER3:16,sp)           /* copy EXR */
-       .endm
-       .macro  RESTOREEXR
-       mov.w   @(LEXR-LER1:16,sp),r1           /* restore EXR */
-       mov.b   r1l,r1h
-       mov.w   r1,@(USEREXR:16,er0)
-       .endm
-#endif
-
-
-/* CPU context save/restore macros. */
-
-       .macro  SAVE_ALL
-       mov.l   er0,@-sp
-       stc     ccr,r0l                         /* check kernel mode */
-       btst    #4,r0l
-       bne     5f
-
-       /* user mode */
-       mov.l   sp,@_sw_usp
-       mov.l   @sp,er0                         /* restore saved er0 */
-       orc     #0x10,ccr                       /* switch kernel stack */
-       mov.l   @_sw_ksp,sp
-       sub.l   #(LRET-LORIG),sp                /* allocate LORIG - LRET */
-       SAVEREGS
-       mov.l   @_sw_usp,er0
-       mov.l   @(USERRET:16,er0),er1           /* copy the RET addr */
-       mov.l   er1,@(LRET-LER3:16,sp)
-       SAVEEXR
-
-       mov.l   @(LORIG-LER3:16,sp),er0
-       mov.l   er0,@(LER0-LER3:16,sp)          /* copy ER0 */
-       mov.w   e1,r1                           /* e1 highbyte = ccr */
-       and     #0xef,r1h                       /* mask mode? flag */
-       bra     6f
-5:
-       /* kernel mode */
-       mov.l   @sp,er0                         /* restore saved er0 */
-       subs    #2,sp                           /* set dummy ccr */
-       SAVEREGS
-       mov.w   @(LRET-LER3:16,sp),r1           /* copy old ccr */
-6:
-       mov.b   r1h,r1l
-       mov.b   #0,r1h
-       mov.w   r1,@(LCCR-LER3:16,sp)           /* set ccr */
-       mov.l   er6,@-sp                        /* syscall arg #6 */
-       mov.l   er5,@-sp                        /* syscall arg #5 */
-       mov.l   er4,@-sp                        /* syscall arg #4 */
-       .endm                                   /* r1 = ccr */
-
-       .macro  RESTORE_ALL
-       mov.l   @sp+,er4
-       mov.l   @sp+,er5
-       mov.l   @sp+,er6
-       RESTOREREGS
-       mov.w   @(LCCR-LER1:16,sp),r0           /* check kernel mode */
-       btst    #4,r0l
-       bne     7f
-
-       orc     #0x80,ccr
-       mov.l   @_sw_usp,er0
-       mov.l   @(LER0-LER1:16,sp),er1          /* restore ER0 */
-       mov.l   er1,@er0
-       RESTOREEXR
-       mov.w   @(LCCR-LER1:16,sp),r1           /* restore the RET addr */
-       mov.b   r1l,r1h
-       mov.b   @(LRET+1-LER1:16,sp),r1l
-       mov.w   r1,e1
-       mov.w   @(LRET+2-LER1:16,sp),r1
-       mov.l   er1,@(USERRET:16,er0)
-
-       mov.l   @sp+,er1
-       add.l   #(LRET-LER1),sp                 /* remove LORIG - LRET */
-       mov.l   sp,@_sw_ksp
-       andc    #0xef,ccr                       /* switch to user mode */
-       mov.l   er0,sp
-       bra     8f
-7:
-       mov.l   @sp+,er1
-       adds    #4,sp
-       adds    #2,sp
-8:
-       mov.l   @sp+,er0
-       adds    #4,sp                           /* remove the sw created LVEC */
-       rte
-       .endm
-
-.globl _system_call
-.globl _ret_from_exception
-.globl _ret_from_fork
-.globl _ret_from_kernel_thread
-.globl _ret_from_interrupt
-.globl _interrupt_redirect_table
-.globl _sw_ksp,_sw_usp
-.globl _resume
-.globl _interrupt_entry
-.globl _trace_break
-
-#if defined(CONFIG_ROMKERNEL)
-       .section .int_redirect,"ax"
-_interrupt_redirect_table:
-#if defined(CONFIG_CPU_H8300H)
-       .rept   7
-       .long   0
-       .endr
-#endif
-#if defined(CONFIG_CPU_H8S)
-       .rept   5
-       .long   0
-       .endr
-       jmp     @_trace_break
-       .long   0
-#endif
-
-       jsr     @_interrupt_entry               /* NMI */
-       jmp     @_system_call                   /* TRAPA #0 (System call) */
-       .long   0
-       .long   0
-       jmp     @_trace_break                   /* TRAPA #3 (breakpoint) */
-       .rept   INTERRUPTS-12
-       jsr     @_interrupt_entry
-       .endr
-#endif
-#if defined(CONFIG_RAMKERNEL)
-.globl _interrupt_redirect_table
-       .section .bss
-_interrupt_redirect_table:
-       .space  4
-#endif
-
-       .section .text
-       .align  2
-_interrupt_entry:
-       SAVE_ALL
-       mov.l   sp,er0
-       add.l   #LVEC,er0
-       btst    #4,r1l
-       bne     1f
-       /* user LVEC */
-       mov.l   @_sw_usp,er0
-       adds    #4,er0
-1:
-       mov.l   @er0,er0                        /* LVEC address */
-#if defined(CONFIG_ROMKERNEL)
-       sub.l   #_interrupt_redirect_table,er0
-#endif
-#if defined(CONFIG_RAMKERNEL)
-       mov.l   @_interrupt_redirect_table,er1
-       sub.l   er1,er0
-#endif
-       SHLR2   er0
-       dec.l   #1,er0
-       mov.l   sp,er1
-       subs    #4,er1                          /* adjust ret_pc */
-       jsr     @_do_IRQ
-       jmp     @_ret_from_interrupt
-
-_system_call:
-       subs    #4,sp                           /* dummy LVEC */
-       SAVE_ALL
-       andc    #0x7f,ccr
-       mov.l   er0,er4
-
-       /* save top of frame */
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       mov.l   sp,er2
-       and.w   #0xe000,r2
-       mov.b   @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
-       btst    #(TIF_SYSCALL_TRACE & 7),r2l
-       beq     1f
-       jsr     @_do_syscall_trace
-1:
-       cmp.l   #NR_syscalls,er4
-       bcc     badsys
-       SHLL2   er4
-       mov.l   #_sys_call_table,er0
-       add.l   er4,er0
-       mov.l   @er0,er4
-       beq     _ret_from_exception:16
-       mov.l   @(LER1:16,sp),er0
-       mov.l   @(LER2:16,sp),er1
-       mov.l   @(LER3:16,sp),er2
-       jsr     @er4
-       mov.l   er0,@(LER0:16,sp)               /* save the return value */
-       mov.l   sp,er2
-       and.w   #0xe000,r2
-       mov.b   @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
-       btst    #(TIF_SYSCALL_TRACE & 7),r2l
-       beq     2f
-       jsr     @_do_syscall_trace
-2:
-#if defined(CONFIG_SYSCALL_PRINT)
-       jsr     @_syscall_print
-#endif
-       orc     #0x80,ccr
-       bra     resume_userspace
-
-badsys:
-       mov.l   #-ENOSYS,er0
-       mov.l   er0,@(LER0:16,sp)
-       bra     resume_userspace
-
-#if !defined(CONFIG_PREEMPT)
-#define resume_kernel restore_all
-#endif
-
-_ret_from_exception:
-#if defined(CONFIG_PREEMPT)
-       orc     #0x80,ccr
-#endif
-_ret_from_interrupt:
-       mov.b   @(LCCR+1:16,sp),r0l
-       btst    #4,r0l
-       bne     resume_kernel:8         /* return from kernel */
-resume_userspace:
-       andc    #0x7f,ccr
-       mov.l   sp,er4
-       and.w   #0xe000,r4              /* er4 <- current thread info */
-       mov.l   @(TI_FLAGS:16,er4),er1
-       and.l   #_TIF_WORK_MASK,er1
-       beq     restore_all:8
-work_pending:
-       btst    #TIF_NEED_RESCHED,r1l
-       bne     work_resched:8
-       /* work notifysig */
-       mov.l   sp,er0
-       subs    #4,er0                  /* er0: pt_regs */
-       jsr     @_do_notify_resume
-       bra     restore_all:8
-work_resched:
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       jsr     @_schedule
-       bra     resume_userspace:8
-restore_all:
-       RESTORE_ALL                     /* Does RTE */
-
-#if defined(CONFIG_PREEMPT)
-resume_kernel:
-       mov.l   @(TI_PRE_COUNT:16,er4),er0
-       bne     restore_all:8
-need_resched:
-       mov.l   @(TI_FLAGS:16,er4),er0
-       btst    #TIF_NEED_RESCHED,r0l
-       beq     restore_all:8
-       mov.b   @(LCCR+1:16,sp),r0l     /* Interrupt Enabled? */
-       bmi     restore_all:8
-       mov.l   #PREEMPT_ACTIVE,er0
-       mov.l   er0,@(TI_PRE_COUNT:16,er4)
-       andc    #0x7f,ccr
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       jsr     @_schedule
-       orc     #0x80,ccr
-       bra     need_resched:8
-#endif
-
-_ret_from_fork:
-       mov.l   er2,er0
-       jsr     @_schedule_tail
-       jmp     @_ret_from_exception
-
-_ret_from_kernel_thread:
-       mov.l   er2,er0
-       jsr     @_schedule_tail
-       mov.l   @(LER4:16,sp),er0
-       mov.l   @(LER5:16,sp),er1
-       jsr     @er1
-       jmp     @_ret_from_exception
-
-_resume:
-       /*
-        * Beware - when entering resume, offset of tss is in d1,
-        * prev (the current task) is in a0, next (the new task)
-        * is in a1 and d2.b is non-zero if the mm structure is
-        * shared between the tasks, so don't change these
-        * registers until their contents are no longer needed.
-        */
-
-       /* save sr */
-       sub.w   r3,r3
-       stc     ccr,r3l
-       mov.w   r3,@(THREAD_CCR+2:16,er0)
-
-       /* disable interrupts */
-       orc     #0x80,ccr
-       mov.l   @_sw_usp,er3
-       mov.l   er3,@(THREAD_USP:16,er0)
-       mov.l   sp,@(THREAD_KSP:16,er0)
-
-       /* Skip address space switching if they are the same. */
-       /* FIXME: what did we hack out of here, this does nothing! */
-
-       mov.l   @(THREAD_USP:16,er1),er0
-       mov.l   er0,@_sw_usp
-       mov.l   @(THREAD_KSP:16,er1),sp
-
-       /* restore status register */
-       mov.w   @(THREAD_CCR+2:16,er1),r3
-
-       ldc     r3l,ccr
-       rts
-
-_trace_break:
-       subs    #4,sp
-       SAVE_ALL
-       sub.l   er1,er1
-       dec.l   #1,er1
-       mov.l   er1,@(LORIG,sp)
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       mov.l   @_sw_usp,er0
-       mov.l   @er0,er1
-       mov.w   @(-2:16,er1),r2
-       cmp.w   #0x5730,r2
-       beq     1f
-       subs    #2,er1
-       mov.l   er1,@er0
-1:
-       and.w   #0xff,e1
-       mov.l   er1,er0
-       jsr     @_trace_trap
-       jmp     @_ret_from_exception
-
-       .section        .bss
-_sw_ksp:
-       .space  4
-_sw_usp:
-       .space  4
-
-       .end
diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c
deleted file mode 100644 (file)
index 084bfd0..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/gpio.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-/*
- * Internal I/O Port Management
- */
-
-#include <linux/stddef.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-
-#define _(addr) (volatile unsigned char *)(addr)
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
-       NULL,    _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
- #if defined(CONFIG_H83002) || defined(CONFIG_H8048)
-/* Fix me!! */
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
-       NULL,    _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),NULL    ,_(P5DDR),_(P6DDR),
-       _(P7DDR),_(P8DDR),NULL,    _(PADDR),_(PBDDR),_(PCDDR),
-       _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR),
-       _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR),
-       _(PGDDR),_(PHDDR)
-};
-#define MAX_PORT 17
-#endif
-#undef _
-#if !defined(P1DDR)
-#error Unsuppoted CPU Selection
-#endif
-
-static struct {
-       unsigned char used;
-       unsigned char ddr;
-} gpio_regs[MAX_PORT];
-
-extern char *_platform_gpio_table(int length);
-
-int h8300_reserved_gpio(int port, unsigned int bits)
-{
-       unsigned char *used;
-
-       if (port < 0 || port >= MAX_PORT)
-               return -1;
-       used = &(gpio_regs[port].used);
-       if ((*used & bits) != 0)
-               return 0;
-       *used |= bits;
-       return 1;
-}
-
-int h8300_free_gpio(int port, unsigned int bits)
-{
-       unsigned char *used;
-
-       if (port < 0 || port >= MAX_PORT)
-               return -1;
-       used = &(gpio_regs[port].used);
-       if ((*used & bits) != bits)
-               return 0;
-       *used &= (~bits);
-       return 1;
-}
-
-int h8300_set_gpio_dir(int port_bit,int dir)
-{
-       int port = (port_bit >> 8) & 0xff;
-       int bit  = port_bit & 0xff;
-
-       if (ddrs[port] == NULL)
-               return 0;
-       if (gpio_regs[port].used & bit) {
-               if (dir)
-                       gpio_regs[port].ddr |= bit;
-               else
-                       gpio_regs[port].ddr &= ~bit;
-               *ddrs[port] = gpio_regs[port].ddr;
-               return 1;
-       } else
-               return 0;
-}
-
-int h8300_get_gpio_dir(int port_bit)
-{
-       int port = (port_bit >> 8) & 0xff;
-       int bit  = port_bit & 0xff;
-
-       if (ddrs[port] == NULL)
-               return 0;
-       if (gpio_regs[port].used & bit) {
-               return (gpio_regs[port].ddr & bit) != 0;
-       } else
-               return -1;
-}
-
-#if defined(CONFIG_PROC_FS)
-static char *port_status(int portno)
-{
-       static char result[10];
-       static const char io[2]={'I','O'};
-       char *rp;
-       int c;
-       unsigned char used,ddr;
-       
-       used = gpio_regs[portno].used;
-       ddr  = gpio_regs[portno].ddr;
-       result[8]='\0';
-       rp = result + 7;
-       for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1)
-               if (used & 0x01)
-                       *rp = io[ ddr & 0x01];
-               else    
-                       *rp = '-';
-       return result;
-}
-
-static int gpio_proc_show(struct seq_file *m, void *v)
-{
-       static const char port_name[]="123456789ABCDEFGH";
-       int c;
-
-       for (c = 0; c < MAX_PORT; c++) {
-               if (ddrs[c] == NULL)
-                       continue;
-               seq_printf(m, "P%c: %s\n", port_name[c], port_status(c));
-       }
-       return 0;
-}
-
-static int gpio_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, gpio_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations gpio_proc_fops = {
-       .open           = gpio_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static __init int register_proc(void)
-{
-       return proc_create("gpio", S_IRUGO, NULL, &gpio_proc_fops) != NULL;
-}
-
-__initcall(register_proc);
-#endif
-
-void __init h8300_gpio_init(void)
-{
-       memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs));
-}
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
deleted file mode 100644 (file)
index 53d7c0e..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-#include <linux/module.h>
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/checksum.h>
-#include <asm/current.h>
-#include <asm/gpio.h>
-
-//asmlinkage long long __ashrdi3 (long long, int);
-//asmlinkage long long __lshrdi3 (long long, int);
-extern char h8300_debug_device[];
-
-/* platform dependent support */
-
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-
-EXPORT_SYMBOL(ip_fast_csum);
-
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
-/* The following are special because they're not called
-   explicitly (the C compiler generates them).  Fortunately,
-   their interface isn't gonna change any time soon now, so
-   it's OK to leave it out of version control.  */
-//EXPORT_SYMBOL(__ashrdi3);
-//EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler...  (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __gcc_bcmp(void);
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __cmpdi2(void);
-extern void __divdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __moddi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __mulsi3(void);
-extern void __negdi2(void);
-extern void __ucmpdi2(void);
-extern void __udivdi3(void);
-extern void __udivmoddi4(void);
-extern void __udivsi3(void);
-extern void __umoddi3(void);
-extern void __umodsi3(void);
-
-        /* gcc lib functions */
-EXPORT_SYMBOL(__gcc_bcmp);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__cmpdi2);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__mulsi3);
-EXPORT_SYMBOL(__negdi2);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__udivmoddi4);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umoddi3);
-EXPORT_SYMBOL(__umodsi3);
-
-EXPORT_SYMBOL(h8300_reserved_gpio);
-EXPORT_SYMBOL(h8300_free_gpio);
-EXPORT_SYMBOL(h8300_set_gpio_dir);
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
deleted file mode 100644 (file)
index 2fa8ac7..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * linux/arch/h8300/kernel/irq.c
- *
- * Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-
-#include <asm/traps.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/errno.h>
-
-/*#define DEBUG*/
-
-extern unsigned long *interrupt_redirect_table;
-extern const int h8300_saved_vectors[];
-extern const h8300_vector h8300_trap_table[];
-int h8300_enable_irq_pin(unsigned int irq);
-void h8300_disable_irq_pin(unsigned int irq);
-
-#define CPU_VECTOR ((unsigned long *)0x000000)
-#define ADDR_MASK (0xffffff)
-
-static inline int is_ext_irq(unsigned int irq)
-{
-       return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
-}
-
-static void h8300_enable_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               IER_REGS |= 1 << (data->irq - EXT_IRQ0);
-}
-
-static void h8300_disable_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
-}
-
-static unsigned int h8300_startup_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               return h8300_enable_irq_pin(data->irq);
-       else
-               return 0;
-}
-
-static void h8300_shutdown_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               h8300_disable_irq_pin(data->irq);
-}
-
-/*
- * h8300 interrupt controller implementation
- */
-struct irq_chip h8300irq_chip = {
-       .name           = "H8300-INTC",
-       .irq_startup    = h8300_startup_irq,
-       .irq_shutdown   = h8300_shutdown_irq,
-       .irq_enable     = h8300_enable_irq,
-       .irq_disable    = h8300_disable_irq,
-};
-
-#if defined(CONFIG_RAMKERNEL)
-static unsigned long __init *get_vector_address(void)
-{
-       unsigned long *rom_vector = CPU_VECTOR;
-       unsigned long base,tmp;
-       int vec_no;
-
-       base = rom_vector[EXT_IRQ0] & ADDR_MASK;
-
-       /* check romvector format */
-       for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
-               if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
-                       return NULL;
-       }
-
-       /* ramvector base address */
-       base -= EXT_IRQ0*4;
-
-       /* writerble check */
-       tmp = ~(*(volatile unsigned long *)base);
-       (*(volatile unsigned long *)base) = tmp;
-       if ((*(volatile unsigned long *)base) != tmp)
-               return NULL;
-       return (unsigned long *)base;
-}
-
-static void __init setup_vector(void)
-{
-       int i;
-       unsigned long *ramvec,*ramvec_p;
-       const h8300_vector *trap_entry;
-       const int *saved_vector;
-
-       ramvec = get_vector_address();
-       if (ramvec == NULL)
-               panic("interrupt vector serup failed.");
-       else
-               printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
-
-       /* create redirect table */
-       ramvec_p = ramvec;
-       trap_entry = h8300_trap_table;
-       saved_vector = h8300_saved_vectors;
-       for ( i = 0; i < NR_IRQS; i++) {
-               if (i == *saved_vector) {
-                       ramvec_p++;
-                       saved_vector++;
-               } else {
-                       if ( i < NR_TRAPS ) {
-                               if (*trap_entry)
-                                       *ramvec_p = VECTOR(*trap_entry);
-                               ramvec_p++;
-                               trap_entry++;
-                       } else
-                               *ramvec_p++ = REDIRECT(interrupt_entry);
-               }
-       }
-       interrupt_redirect_table = ramvec;
-#ifdef DEBUG
-       ramvec_p = ramvec;
-       for (i = 0; i < NR_IRQS; i++) {
-               if ((i % 8) == 0)
-                       printk(KERN_DEBUG "\n%p: ",ramvec_p);
-               printk(KERN_DEBUG "%p ",*ramvec_p);
-               ramvec_p++;
-       }
-       printk(KERN_DEBUG "\n");
-#endif
-}
-#else
-#define setup_vector() do { } while(0)
-#endif
-
-void __init init_IRQ(void)
-{
-       int c;
-
-       setup_vector();
-
-       for (c = 0; c < NR_IRQS; c++)
-               irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
-}
-
-asmlinkage void do_IRQ(int irq)
-{
-       irq_enter();
-       generic_handle_irq(irq);
-       irq_exit();
-}
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
deleted file mode 100644 (file)
index 1d526e0..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
-                      const char *strtab,
-                      unsigned int symindex,
-                      unsigned int relsec,
-                      struct module *me)
-{
-       unsigned int i;
-       Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
-
-       DEBUGP("Applying relocate section %u to %u\n", relsec,
-              sechdrs[relsec].sh_info);
-       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
-               /* This is where to make the change */
-               uint32_t *loc = (uint32_t *)(sechdrs[sechdrs[relsec].sh_info].sh_addr
-                                            + rela[i].r_offset);
-               /* This is the symbol it is referring to.  Note that all
-                  undefined symbols have been resolved.  */
-               Elf32_Sym *sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
-                       + ELF32_R_SYM(rela[i].r_info);
-               uint32_t v = sym->st_value + rela[i].r_addend;
-
-               switch (ELF32_R_TYPE(rela[i].r_info)) {
-               case R_H8_DIR24R8:
-                       loc = (uint32_t *)((uint32_t)loc - 1);
-                       *loc = (*loc & 0xff000000) | ((*loc & 0xffffff) + v);
-                       break;
-               case R_H8_DIR24A8:
-                       if (ELF32_R_SYM(rela[i].r_info))
-                               *loc += v;
-                       break;
-               case R_H8_DIR32:
-               case R_H8_DIR32A16:
-                       *loc += v;
-                       break;
-               case R_H8_PCREL16:
-                       v -= (unsigned long)loc + 2;
-                       if ((Elf32_Sword)v > 0x7fff || 
-                           (Elf32_Sword)v < -(Elf32_Sword)0x8000)
-                               goto overflow;
-                       else 
-                               *(unsigned short *)loc = v;
-                       break;
-               case R_H8_PCREL8:
-                       v -= (unsigned long)loc + 1;
-                       if ((Elf32_Sword)v > 0x7f || 
-                           (Elf32_Sword)v < -(Elf32_Sword)0x80)
-                               goto overflow;
-                       else 
-                               *(unsigned char *)loc = v;
-                       break;
-               default:
-                       printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-                              me->name, ELF32_R_TYPE(rela[i].r_info));
-                       return -ENOEXEC;
-               }
-       }
-       return 0;
- overflow:
-       printk(KERN_ERR "module %s: relocation offset overflow: %08x\n",
-              me->name, rela[i].r_offset);
-       return -ENOEXEC;
-}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
deleted file mode 100644 (file)
index 1a744ab..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/process.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/kernel/process.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
- *                      Kenneth Albanowski <kjahds@kjahds.com>,
- *                      The Silver Hammer Group, Ltd.
- *
- *  linux/arch/m68k/kernel/process.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- *
- *  68060 fixes by Jesper Skov
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/rcupdate.h>
-
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <asm/pgtable.h>
-
-void (*pm_power_off)(void) = NULL;
-EXPORT_SYMBOL(pm_power_off);
-
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
-
-/*
- * The idle loop on an H8/300..
- */
-#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
-void arch_cpu_idle(void)
-{
-       local_irq_enable();
-       /* XXX: race here! What if need_resched() gets set now? */
-       __asm__("sleep");
-}
-#endif
-
-void machine_restart(char * __unused)
-{
-       local_irq_disable();
-       __asm__("jmp @@0"); 
-}
-
-void machine_halt(void)
-{
-       local_irq_disable();
-       __asm__("sleep");
-       for (;;);
-}
-
-void machine_power_off(void)
-{
-       local_irq_disable();
-       __asm__("sleep");
-       for (;;);
-}
-
-void show_regs(struct pt_regs * regs)
-{
-       show_regs_print_info(KERN_DEFAULT);
-
-       printk("\nPC: %08lx  Status: %02x",
-              regs->pc, regs->ccr);
-       printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
-              regs->orig_er0, regs->er0, regs->er1);
-       printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
-              regs->er2, regs->er3, regs->er4, regs->er5);
-       printk("\nER6' %08lx ",regs->er6);
-       if (user_mode(regs))
-               printk("USP: %08lx\n", rdusp());
-       else
-               printk("\n");
-}
-
-void flush_thread(void)
-{
-}
-
-int copy_thread(unsigned long clone_flags,
-                unsigned long usp, unsigned long topstk,
-                struct task_struct * p)
-{
-       struct pt_regs * childregs;
-
-       childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
-
-       if (unlikely(p->flags & PF_KTHREAD)) {
-               memset(childregs, 0, sizeof(struct pt_regs));
-               childregs->retpc = (unsigned long) ret_from_kernel_thread;
-               childregs->er4 = topstk; /* arg */
-               childregs->er5 = usp; /* fn */
-               p->thread.ksp = (unsigned long)childregs;
-       }
-       *childregs = *current_pt_regs();
-       childregs->retpc = (unsigned long) ret_from_fork;
-       childregs->er0 = 0;
-       p->thread.usp = usp ?: rdusp();
-       p->thread.ksp = (unsigned long)childregs;
-
-       return 0;
-}
-
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long fp, pc;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-
-       stack_page = (unsigned long)p;
-       fp = ((struct pt_regs *)p->thread.ksp)->er6;
-       do {
-               if (fp < stack_page+sizeof(struct thread_info) ||
-                   fp >= 8184+stack_page)
-                       return 0;
-               pc = ((unsigned long *)fp)[1];
-               if (!in_sched_functions(pc))
-                       return pc;
-               fp = *(unsigned long *) fp;
-       } while (count++ < 16);
-       return 0;
-}
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
deleted file mode 100644 (file)
index 748cf65..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/ptrace.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Based on:
- *  linux/arch/m68k/kernel/ptrace.c
- *
- *  Copyright (C) 1994 by Hamish Macdonald
- *  Taken from linux/kernel/ptrace.c and modified for M680x0.
- *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/signal.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/signal.h>
-
-/* cpu depend functions */
-extern long h8300_get_reg(struct task_struct *task, int regno);
-extern int  h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
-
-
-void user_disable_single_step(struct task_struct *child)
-{
-}
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-void ptrace_disable(struct task_struct *child)
-{
-       user_disable_single_step(child);
-}
-
-long arch_ptrace(struct task_struct *child, long request,
-                unsigned long addr, unsigned long data)
-{
-       int ret;
-       int regno = addr >> 2;
-       unsigned long __user *datap = (unsigned long __user *) data;
-
-       switch (request) {
-       /* read the word at location addr in the USER area. */
-               case PTRACE_PEEKUSR: {
-                       unsigned long tmp = 0;
-                       
-                       if ((addr & 3) || addr >= sizeof(struct user)) {
-                               ret = -EIO;
-                               break ;
-                       }
-                       
-                       ret = 0;  /* Default return condition */
-
-                       if (regno < H8300_REGS_NO)
-                               tmp = h8300_get_reg(child, regno);
-                       else {
-                               switch (regno) {
-                               case 49:
-                                       tmp = child->mm->start_code;
-                                       break ;
-                               case 50:
-                                       tmp = child->mm->start_data;
-                                       break ;
-                               case 51:
-                                       tmp = child->mm->end_code;
-                                       break ;
-                               case 52:
-                                       tmp = child->mm->end_data;
-                                       break ;
-                               default:
-                                       ret = -EIO;
-                               }
-                       }
-                       if (!ret)
-                               ret = put_user(tmp, datap);
-                       break ;
-               }
-
-      /* when I and D space are separate, this will have to be fixed. */
-               case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-                       if ((addr & 3) || addr >= sizeof(struct user)) {
-                               ret = -EIO;
-                               break ;
-                       }
-                           
-                       if (regno == PT_ORIG_ER0) {
-                               ret = -EIO;
-                               break ;
-                       }
-                       if (regno < H8300_REGS_NO) {
-                               ret = h8300_put_reg(child, regno, data);
-                               break ;
-                       }
-                       ret = -EIO;
-                       break ;
-
-               case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-                       int i;
-                       unsigned long tmp;
-                       for (i = 0; i < H8300_REGS_NO; i++) {
-                           tmp = h8300_get_reg(child, i);
-                           if (put_user(tmp, datap)) {
-                               ret = -EFAULT;
-                               break;
-                           }
-                           datap++;
-                       }
-                       ret = 0;
-                       break;
-               }
-
-               case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-                       int i;
-                       unsigned long tmp;
-                       for (i = 0; i < H8300_REGS_NO; i++) {
-                           if (get_user(tmp, datap)) {
-                               ret = -EFAULT;
-                               break;
-                           }
-                           h8300_put_reg(child, i, tmp);
-                           datap++;
-                       }
-                       ret = 0;
-                       break;
-               }
-
-               default:
-                       ret = ptrace_request(child, request, addr, data);
-                       break;
-       }
-       return ret;
-}
-
-asmlinkage void do_syscall_trace(void)
-{
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-       if (!(current->ptrace & PT_PTRACED))
-               return;
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-                                ? 0x80 : 0));
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
-}
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
deleted file mode 100644 (file)
index d0b1607..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/setup.c
- *
- *  Copyleft  ()) 2000       James D. Schettine {james@telos-systems.com}
- *  Copyright (C) 1999,2000  Greg Ungerer (gerg@snapgear.com)
- *  Copyright (C) 1998,1999  D. Jeff Dionne <jeff@lineo.ca>
- *  Copyright (C) 1998       Kenneth Albanowski <kjahds@kjahds.com>
- *  Copyright (C) 1995       Hamish Macdonald
- *  Copyright (C) 2000       Lineo Inc. (www.lineo.com) 
- *  Copyright (C) 2001              Lineo, Inc. <www.lineo.com>
- *
- *  H8/300 porting Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-/*
- * This file handles the architecture-dependent parts of system setup
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/fb.h>
-#include <linux/console.h>
-#include <linux/genhd.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/major.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-
-#include <asm/setup.h>
-#include <asm/irq.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#if defined(__H8300H__)
-#define CPU "H8/300H"
-#include <asm/regs306x.h>
-#endif
-
-#if defined(__H8300S__)
-#define CPU "H8S"
-#include <asm/regs267x.h>
-#endif
-
-#define STUBSIZE 0xc000
-
-unsigned long rom_length;
-unsigned long memory_start;
-unsigned long memory_end;
-
-char __initdata command_line[COMMAND_LINE_SIZE];
-
-extern int _ramstart, _ramend;
-extern char _target_name[];
-extern void h8300_gpio_init(void);
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) \
-    && defined(CONFIG_GDB_MAGICPRINT)
-/* printk with gdb service */
-static void gdb_console_output(struct console *c, const char *msg, unsigned len)
-{
-       for (; len > 0; len--) {
-               asm("mov.w %0,r2\n\t"
-                    "jsr @0xc4"::"r"(*msg++):"er2");
-       }
-}
-
-/*
- *     Setup initial baud/bits/parity. We do two things here:
- *     - construct a cflag setting for the first rs_open()
- *     - initialize the serial port
- *     Return non-zero if we didn't find a serial port.
- */
-static int __init gdb_console_setup(struct console *co, char *options)
-{
-       return 0;
-}
-
-static const struct console gdb_console = {
-       .name           = "gdb_con",
-       .write          = gdb_console_output,
-       .device         = NULL,
-       .setup          = gdb_console_setup,
-       .flags          = CON_PRINTBUFFER,
-       .index          = -1,
-};
-#endif
-
-void __init setup_arch(char **cmdline_p)
-{
-       int bootmap_size;
-
-       memory_start = (unsigned long) &_ramstart;
-
-       /* allow for ROMFS on the end of the kernel */
-       if (memcmp((void *)memory_start, "-rom1fs-", 8) == 0) {
-#if defined(CONFIG_BLK_DEV_INITRD)
-               initrd_start = memory_start;
-               initrd_end = memory_start += be32_to_cpu(((unsigned long *) (memory_start))[2]);
-#else
-               memory_start += be32_to_cpu(((unsigned long *) memory_start)[2]);
-#endif
-       }
-       memory_start = PAGE_ALIGN(memory_start);
-#if !defined(CONFIG_BLKDEV_RESERVE)
-       memory_end = (unsigned long) &_ramend; /* by now the stack is part of the init task */
-#if defined(CONFIG_GDB_DEBUG)
-       memory_end -= STUBSIZE;
-#endif
-#else
-       if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && 
-           (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
-           /* overlap userarea */
-           memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
-#endif
-
-       init_mm.start_code = (unsigned long) _stext;
-       init_mm.end_code = (unsigned long) _etext;
-       init_mm.end_data = (unsigned long) _edata;
-       init_mm.brk = (unsigned long) 0; 
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
-       register_console((struct console *)&gdb_console);
-#endif
-
-       printk(KERN_INFO "\r\n\nuClinux " CPU "\n");
-       printk(KERN_INFO "Target Hardware: %s\n",_target_name);
-       printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
-       printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
-
-#ifdef DEBUG
-       printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
-               "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
-               __bss_stop);
-       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
-               "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
-               memory_end, memory_end, &_ramend);
-#endif
-
-#ifdef CONFIG_DEFAULT_CMDLINE
-       /* set from default command line */
-       if (*command_line == '\0')
-               strcpy(command_line,CONFIG_KERNEL_COMMAND);
-#endif
-       /* Keep a copy of command line */
-       *cmdline_p = &command_line[0];
-       memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
-       boot_command_line[COMMAND_LINE_SIZE-1] = 0;
-
-#ifdef DEBUG
-       if (strlen(*cmdline_p)) 
-               printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
-#endif
-
-       /*
-        * give all the memory to the bootmap allocator,  tell it to put the
-        * boot mem_map at the start of memory
-        */
-       bootmap_size = init_bootmem_node(
-                       NODE_DATA(0),
-                       memory_start >> PAGE_SHIFT, /* map goes here */
-                       PAGE_OFFSET >> PAGE_SHIFT,      /* 0 on coldfire */
-                       memory_end >> PAGE_SHIFT);
-       /*
-        * free the usable memory,  we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(memory_start, memory_end - memory_start);
-       reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-       /*
-        * get kmalloc into gear
-        */
-       paging_init();
-       h8300_gpio_init();
-#if defined(CONFIG_H8300_AKI3068NET) && defined(CONFIG_IDE)
-       {
-#define AREABIT(addr) (1 << (((addr) >> 21) & 7))
-               /* setup BSC */
-               volatile unsigned char *abwcr = (volatile unsigned char *)ABWCR;
-               volatile unsigned char *cscr = (volatile unsigned char *)CSCR;
-               *abwcr &= ~(AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT));
-               *cscr  |= (AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)) | 0x0f;
-       }
-#endif
-#ifdef DEBUG
-       printk(KERN_DEBUG "Done setup_arch\n");
-#endif
-}
-
-/*
- *     Get CPU information for use by the procfs.
- */
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
-    char *cpu;
-    int mode;
-    u_long clockfreq;
-
-    cpu = CPU;
-    mode = *(volatile unsigned char *)MDCR & 0x07;
-
-    clockfreq = CONFIG_CPU_CLOCK;
-
-    seq_printf(m,  "CPU:\t\t%s (mode:%d)\n"
-                  "Clock:\t\t%lu.%1luMHz\n"
-                  "BogoMips:\t%lu.%02lu\n"
-                  "Calibration:\t%lu loops\n",
-                  cpu,mode,
-                  clockfreq/1000,clockfreq%1000,
-                  (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
-                  (loops_per_jiffy*HZ));
-
-    return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-       .start  = c_start,
-       .next   = c_next,
-       .stop   = c_stop,
-       .show   = show_cpuinfo,
-};
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
deleted file mode 100644 (file)
index a65ff3b..0000000
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/signal.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
- *                and David McCullough <davidm@snapgear.com>
- *
- * Based on
- * Linux/m68k by Hamish Macdonald
- */
-
-/*
- * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
- * Atari :-) Current limitation: Only one sigstack can be active at one time.
- * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
- * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
- * signal handlers!
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/personality.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/tracehook.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/ucontext.h>
-
-/*
- * Do a signal return; undo the signal stack.
- *
- * Keep the return code on the stack quadword aligned!
- * That makes the cache flush below easier.
- */
-
-struct sigframe
-{
-       long dummy_er0;
-       long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
-       short dummy_exr;
-#endif
-       long dummy_pc;
-       char *pretcode;
-       unsigned char retcode[8];
-       unsigned long extramask[_NSIG_WORDS-1];
-       struct sigcontext sc;
-       int sig;
-} __attribute__((aligned(2),packed));
-
-struct rt_sigframe
-{
-       long dummy_er0;
-       long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
-       short dummy_exr;
-#endif
-       long dummy_pc;
-       char *pretcode;
-       struct siginfo *pinfo;
-       void *puc;
-       unsigned char retcode[8];
-       struct siginfo info;
-       struct ucontext uc;
-       int sig;
-} __attribute__((aligned(2),packed));
-
-static inline int
-restore_sigcontext(struct sigcontext *usc, int *pd0)
-{
-       struct pt_regs *regs = current_pt_regs();
-       int err = 0;
-       unsigned int ccr;
-       unsigned int usp;
-       unsigned int er0;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r)    /* restore passed registers */
-       COPY(er1);
-       COPY(er2);
-       COPY(er3);
-       COPY(er5);
-       COPY(pc);
-       ccr = regs->ccr & 0x10;
-       COPY(ccr);
-#undef COPY
-       regs->ccr &= 0xef;
-       regs->ccr |= ccr;
-       regs->orig_er0 = -1;            /* disable syscall checks */
-       err |= __get_user(usp, &usc->sc_usp);
-       wrusp(usp);
-
-       err |= __get_user(er0, &usc->sc_er0);
-       *pd0 = er0;
-       return err;
-}
-
-asmlinkage int sys_sigreturn(void)
-{
-       unsigned long usp = rdusp();
-       struct sigframe *frame = (struct sigframe *)(usp - 4);
-       sigset_t set;
-       int er0;
-
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
-           (_NSIG_WORDS > 1 &&
-            __copy_from_user(&set.sig[1], &frame->extramask,
-                             sizeof(frame->extramask))))
-               goto badframe;
-
-       set_current_blocked(&set);
-       
-       if (restore_sigcontext(&frame->sc, &er0))
-               goto badframe;
-       return er0;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(void)
-{
-       unsigned long usp = rdusp();
-       struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
-       sigset_t set;
-       int er0;
-
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-               goto badframe;
-
-       set_current_blocked(&set);
-       
-       if (restore_sigcontext(&frame->uc.uc_mcontext, &er0))
-               goto badframe;
-
-       if (restore_altstack(&frame->uc.uc_stack))
-               goto badframe;
-
-       return er0;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
-                            unsigned long mask)
-{
-       int err = 0;
-
-       err |= __put_user(regs->er0, &sc->sc_er0);
-       err |= __put_user(regs->er1, &sc->sc_er1);
-       err |= __put_user(regs->er2, &sc->sc_er2);
-       err |= __put_user(regs->er3, &sc->sc_er3);
-       err |= __put_user(regs->er4, &sc->sc_er4);
-       err |= __put_user(regs->er5, &sc->sc_er5);
-       err |= __put_user(regs->er6, &sc->sc_er6);
-       err |= __put_user(rdusp(),   &sc->sc_usp);
-       err |= __put_user(regs->pc,  &sc->sc_pc);
-       err |= __put_user(regs->ccr, &sc->sc_ccr);
-       err |= __put_user(mask,      &sc->sc_mask);
-
-       return err;
-}
-
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
-       unsigned long usp;
-
-       /* Default to using normal stack.  */
-       usp = rdusp();
-
-       /* This is the X/Open sanctioned signal stack switching.  */
-       if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (!sas_ss_flags(usp))
-                       usp = current->sas_ss_sp + current->sas_ss_size;
-       }
-       return (void *)((usp - frame_size) & -8UL);
-}
-
-static int setup_frame (int sig, struct k_sigaction *ka,
-                        sigset_t *set, struct pt_regs *regs)
-{
-       struct sigframe *frame;
-       int err = 0;
-       int usig;
-       unsigned char *ret;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       usig = current_thread_info()->exec_domain
-               && current_thread_info()->exec_domain->signal_invmap
-               && sig < 32
-               ? current_thread_info()->exec_domain->signal_invmap[sig]
-               : sig;
-
-       err |= __put_user(usig, &frame->sig);
-       if (err)
-               goto give_sigsegv;
-
-       err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-       if (err)
-               goto give_sigsegv;
-
-       if (_NSIG_WORDS > 1) {
-               err |= copy_to_user(frame->extramask, &set->sig[1],
-                                   sizeof(frame->extramask));
-               if (err)
-                       goto give_sigsegv;
-       }
-
-       ret = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               ret = (unsigned char *)(ka->sa.sa_restorer);
-       else {
-               /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
-               err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
-                                 (unsigned long *)(frame->retcode + 0));
-               err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
-       }
-
-       /* Set up to return from userspace.  */
-       err |= __put_user(ret, &frame->pretcode);
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ka->sa.sa_handler;
-       regs->er0 = (current_thread_info()->exec_domain
-                          && current_thread_info()->exec_domain->signal_invmap
-                          && sig < 32
-                          ? current_thread_info()->exec_domain->signal_invmap[sig]
-                         : sig);
-       regs->er1 = (unsigned long)&(frame->sc);
-       regs->er5 = current->mm->start_data;    /* GOT base */
-
-       return 0;
-
-give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
-}
-
-static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
-                           sigset_t *set, struct pt_regs *regs)
-{
-       struct rt_sigframe *frame;
-       int err = 0;
-       int usig;
-       unsigned char *ret;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       usig = current_thread_info()->exec_domain
-               && current_thread_info()->exec_domain->signal_invmap
-               && sig < 32
-               ? current_thread_info()->exec_domain->signal_invmap[sig]
-               : sig;
-
-       err |= __put_user(usig, &frame->sig);
-       if (err)
-               goto give_sigsegv;
-
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               goto give_sigsegv;
-
-       /* Create the ucontext.  */
-       err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __save_altstack(&frame->uc.uc_stack, rdusp());
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
-       err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up to return from userspace.  */
-       ret = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               ret = (unsigned char *)(ka->sa.sa_restorer);
-       else {
-               /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
-               err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
-                                 (unsigned long *)(frame->retcode + 0));
-               err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
-       }
-       err |= __put_user(ret, &frame->pretcode);
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       wrusp ((unsigned long) frame);
-       regs->pc  = (unsigned long) ka->sa.sa_handler;
-       regs->er0 = (current_thread_info()->exec_domain
-                    && current_thread_info()->exec_domain->signal_invmap
-                    && sig < 32
-                    ? current_thread_info()->exec_domain->signal_invmap[sig]
-                    : sig);
-       regs->er1 = (unsigned long)&(frame->info);
-       regs->er2 = (unsigned long)&frame->uc;
-       regs->er5 = current->mm->start_data;    /* GOT base */
-
-       return 0;
-
-give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-             struct pt_regs * regs)
-{
-       sigset_t *oldset = sigmask_to_save();
-       int ret;
-       /* are we from a system call? */
-       if (regs->orig_er0 >= 0) {
-               switch (regs->er0) {
-                       case -ERESTART_RESTARTBLOCK:
-                       case -ERESTARTNOHAND:
-                               regs->er0 = -EINTR;
-                               break;
-
-                       case -ERESTARTSYS:
-                               if (!(ka->sa.sa_flags & SA_RESTART)) {
-                                       regs->er0 = -EINTR;
-                                       break;
-                               }
-                       /* fallthrough */
-                       case -ERESTARTNOINTR:
-                               regs->er0 = regs->orig_er0;
-                               regs->pc -= 2;
-               }
-       }
-
-       /* set up the stack frame */
-       if (ka->sa.sa_flags & SA_SIGINFO)
-               ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       else
-               ret = setup_frame(sig, ka, oldset, regs);
-
-       if (!ret)
-               signal_delivered(sig, info, ka, regs, 0);
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-static void do_signal(struct pt_regs *regs)
-{
-       siginfo_t info;
-       int signr;
-       struct k_sigaction ka;
-
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if ((regs->ccr & 0x10))
-               return;
-
-       current->thread.esp0 = (unsigned long) regs;
-
-       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-       if (signr > 0) {
-               /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &info, &ka, regs);
-               return;
-       }
-       /* Did we come from a system call? */
-       if (regs->orig_er0 >= 0) {
-               /* Restart the system call - no handlers present */
-               if (regs->er0 == -ERESTARTNOHAND ||
-                   regs->er0 == -ERESTARTSYS ||
-                   regs->er0 == -ERESTARTNOINTR) {
-                       regs->er0 = regs->orig_er0;
-                       regs->pc -= 2;
-               }
-               if (regs->er0 == -ERESTART_RESTARTBLOCK){
-                       regs->er0 = __NR_restart_syscall;
-                       regs->pc -= 2;
-               }
-       }
-
-       /* If there's no signal to deliver, we just restore the saved mask.  */
-       restore_saved_sigmask();
-}
-
-asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
-{
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs);
-
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
-               clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(regs);
-       }
-}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
deleted file mode 100644 (file)
index bf350cb..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/h8300/kernel/sys_h8300.c
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the H8/300
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/cachectl.h>
-#include <asm/traps.h>
-#include <asm/unistd.h>
-
-/* sys_cacheflush -- no support.  */
-asmlinkage int
-sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
-{
-       return -EINVAL;
-}
-
-asmlinkage int sys_getpagesize(void)
-{
-       return PAGE_SIZE;
-}
-
-#if defined(CONFIG_SYSCALL_PRINT)
-asmlinkage void syscall_print(void *dummy,...)
-{
-       struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
-       printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n",
-               ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
-}
-#endif
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
deleted file mode 100644 (file)
index c55e0ed..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/* Systemcall Entry Table */
-#include <linux/sys.h>
-#include <asm/linkage.h>
-#include <asm/unistd.h>
-
-#define CALL(x)        .long _ ## x
-
-.globl _sys_call_table
-
-#if defined(CONFIG_CPU_H8300H)
-       .h8300h
-#endif
-#if defined(CONFIG_CPU_H8S)
-       .h8300s
-#endif
-       .section .text
-       .align  2
-_sys_call_table:
-       CALL(sys_ni_syscall)            /* 0  -  old "setup()" system call*/
-       CALL(sys_exit)
-       CALL(sys_fork)
-       CALL(sys_read)
-       CALL(sys_write)
-       CALL(sys_open)                  /* 5 */
-       CALL(sys_close)
-       CALL(sys_waitpid)
-       CALL(sys_creat)
-       CALL(sys_link)
-       CALL(sys_unlink)                /* 10 */
-       CALL(sys_execve)
-       CALL(sys_chdir)
-       CALL(sys_time)
-       CALL(sys_mknod)
-       CALL(sys_chmod)                 /* 15 */
-       CALL(sys_chown16)
-       CALL(sys_ni_syscall)            /* old break syscall holder */
-       CALL(sys_stat)
-       CALL(sys_lseek)
-       CALL(sys_getpid)                /* 20 */
-       CALL(sys_mount)
-       CALL(sys_oldumount)
-       CALL(sys_setuid16)
-       CALL(sys_getuid16)
-       CALL(sys_stime)                 /* 25 */
-       CALL(sys_ptrace)
-       CALL(sys_alarm)
-       CALL(sys_fstat)
-       CALL(sys_pause)
-       CALL(sys_utime)                 /* 30 */
-       CALL(sys_ni_syscall)            /* old stty syscall holder */
-       CALL(sys_ni_syscall)            /* old gtty syscall holder */
-       CALL(sys_access)
-       CALL(sys_nice)
-       CALL(sys_ni_syscall)            /* 35 old ftime syscall holder */
-       CALL(sys_sync)
-       CALL(sys_kill)
-       CALL(sys_rename)
-       CALL(sys_mkdir)
-       CALL(sys_rmdir)                 /* 40 */
-       CALL(sys_dup)
-       CALL(sys_pipe)
-       CALL(sys_times)
-       CALL(sys_ni_syscall)            /* old prof syscall holder */
-       CALL(sys_brk)                   /* 45 */
-       CALL(sys_setgid16)
-       CALL(sys_getgid16)
-       CALL(sys_signal)
-       CALL(sys_geteuid16)
-       CALL(sys_getegid16)             /* 50 */
-       CALL(sys_acct)
-       CALL(sys_umount)                /* recycled never used phys() */
-       CALL(sys_ni_syscall)            /* old lock syscall holder */
-       CALL(sys_ioctl)
-       CALL(sys_fcntl)                 /* 55 */
-       CALL(sys_ni_syscall)            /* old mpx syscall holder */
-       CALL(sys_setpgid)
-       CALL(sys_ni_syscall)            /* old ulimit syscall holder */
-       CALL(sys_ni_syscall)
-       CALL(sys_umask)                 /* 60 */
-       CALL(sys_chroot)
-       CALL(sys_ustat)
-       CALL(sys_dup2)
-       CALL(sys_getppid)
-       CALL(sys_getpgrp)               /* 65 */
-       CALL(sys_setsid)
-       CALL(sys_sigaction)
-       CALL(sys_sgetmask)
-       CALL(sys_ssetmask)
-       CALL(sys_setreuid16)            /* 70 */
-       CALL(sys_setregid16)
-       CALL(sys_sigsuspend)
-       CALL(sys_sigpending)
-       CALL(sys_sethostname)
-       CALL(sys_setrlimit)             /* 75 */
-       CALL(sys_old_getrlimit)
-       CALL(sys_getrusage)
-       CALL(sys_gettimeofday)
-       CALL(sys_settimeofday)
-       CALL(sys_getgroups16)           /* 80 */
-       CALL(sys_setgroups16)
-       CALL(sys_old_select)
-       CALL(sys_symlink)
-       CALL(sys_lstat)
-       CALL(sys_readlink)              /* 85 */
-       CALL(sys_uselib)
-       CALL(sys_swapon)
-       CALL(sys_reboot)
-       CALL(sys_old_readdir)
-       CALL(sys_old_mmap)              /* 90 */
-       CALL(sys_munmap)
-       CALL(sys_truncate)
-       CALL(sys_ftruncate)
-       CALL(sys_fchmod)
-       CALL(sys_fchown16)              /* 95 */
-       CALL(sys_getpriority)
-       CALL(sys_setpriority)
-       CALL(sys_ni_syscall)            /* old profil syscall holder */
-       CALL(sys_statfs)
-       CALL(sys_fstatfs)               /* 100 */
-       CALL(sys_ni_syscall)            /* ioperm for i386 */
-       CALL(sys_socketcall)
-       CALL(sys_syslog)
-       CALL(sys_setitimer)
-       CALL(sys_getitimer)             /* 105 */
-       CALL(sys_newstat)
-       CALL(sys_newlstat)
-       CALL(sys_newfstat)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)            /* iopl for i386 */ /* 110 */
-       CALL(sys_vhangup)
-       CALL(sys_ni_syscall)            /* obsolete idle() syscall */
-       CALL(sys_ni_syscall)            /* vm86old for i386 */
-       CALL(sys_wait4)
-       CALL(sys_swapoff)               /* 115 */
-       CALL(sys_sysinfo)
-       CALL(sys_ipc)
-       CALL(sys_fsync)
-       CALL(sys_sigreturn)
-       CALL(sys_clone)                 /* 120 */
-       CALL(sys_setdomainname)
-       CALL(sys_newuname)
-       CALL(sys_cacheflush)            /* modify_ldt for i386 */
-       CALL(sys_adjtimex)
-       CALL(sys_ni_syscall)            /* 125 sys_mprotect */
-       CALL(sys_sigprocmask)
-       CALL(sys_ni_syscall)            /* sys_create_module */
-       CALL(sys_init_module)
-       CALL(sys_delete_module)
-       CALL(sys_ni_syscall)            /* 130 sys_get_kernel_syms */
-       CALL(sys_quotactl)
-       CALL(sys_getpgid)
-       CALL(sys_fchdir)
-       CALL(sys_bdflush)
-       CALL(sys_sysfs)                 /* 135 */
-       CALL(sys_personality)
-       CALL(sys_ni_syscall)            /* for afs_syscall */
-       CALL(sys_setfsuid16)
-       CALL(sys_setfsgid16)
-       CALL(sys_llseek)                /* 140 */
-       CALL(sys_getdents)
-       CALL(sys_select)
-       CALL(sys_flock)
-       CALL(sys_ni_syscall)            /* sys_msync */
-       CALL(sys_readv)                 /* 145 */
-       CALL(sys_writev)
-       CALL(sys_getsid)
-       CALL(sys_fdatasync)
-       CALL(sys_sysctl)
-       CALL(sys_ni_syscall)            /* 150 sys_mlock */
-       CALL(sys_ni_syscall)            /* sys_munlock */
-       CALL(sys_ni_syscall)            /* sys_mlockall */
-       CALL(sys_ni_syscall)            /* sys_munlockall */
-       CALL(sys_sched_setparam)
-       CALL(sys_sched_getparam)        /* 155 */
-       CALL(sys_sched_setscheduler)
-       CALL(sys_sched_getscheduler)
-       CALL(sys_sched_yield)
-       CALL(sys_sched_get_priority_max)
-       CALL(sys_sched_get_priority_min)  /* 160 */
-       CALL(sys_sched_rr_get_interval)
-       CALL(sys_nanosleep)
-       CALL(sys_ni_syscall)            /* sys_mremap */
-       CALL(sys_setresuid16)
-       CALL(sys_getresuid16)           /* 165 */
-       CALL(sys_ni_syscall)            /* for vm86 */
-       CALL(sys_ni_syscall)            /* sys_query_module */
-       CALL(sys_poll)
-       CALL(sys_ni_syscall)            /* old nfsservctl */
-       CALL(sys_setresgid16)           /* 170 */
-       CALL(sys_getresgid16)
-       CALL(sys_prctl)
-       CALL(sys_rt_sigreturn)
-       CALL(sys_rt_sigaction)
-       CALL(sys_rt_sigprocmask)        /* 175 */
-       CALL(sys_rt_sigpending)
-       CALL(sys_rt_sigtimedwait)
-       CALL(sys_rt_sigqueueinfo)
-       CALL(sys_rt_sigsuspend)
-       CALL(sys_pread64)               /* 180 */
-       CALL(sys_pwrite64)
-       CALL(sys_lchown16);
-       CALL(sys_getcwd)
-       CALL(sys_capget)
-       CALL(sys_capset)                /* 185 */
-       CALL(sys_sigaltstack)
-       CALL(sys_sendfile)
-       CALL(sys_ni_syscall)            /* streams1 */
-       CALL(sys_ni_syscall)            /* streams2 */
-       CALL(sys_vfork)                 /* 190 */
-       CALL(sys_getrlimit)
-       CALL(sys_mmap_pgoff)
-       CALL(sys_truncate64)
-       CALL(sys_ftruncate64)
-       CALL(sys_stat64)                /* 195 */
-       CALL(sys_lstat64)
-       CALL(sys_fstat64)
-       CALL(sys_chown)
-       CALL(sys_getuid)
-       CALL(sys_getgid)                /* 200 */
-       CALL(sys_geteuid)
-       CALL(sys_getegid)
-       CALL(sys_setreuid)
-       CALL(sys_setregid)
-       CALL(sys_getgroups)             /* 205 */
-       CALL(sys_setgroups)
-       CALL(sys_fchown)
-       CALL(sys_setresuid)
-       CALL(sys_getresuid)
-       CALL(sys_setresgid)             /* 210 */
-       CALL(sys_getresgid)
-       CALL(sys_lchown)
-       CALL(sys_setuid)
-       CALL(sys_setgid)
-       CALL(sys_setfsuid)              /* 215 */
-       CALL(sys_setfsgid)
-       CALL(sys_pivot_root)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)
-       CALL(sys_getdents64)            /* 220 */
-       CALL(sys_fcntl64)
-       CALL(sys_ni_syscall)            /* reserved TUX */
-       CALL(sys_ni_syscall)            /* reserved Security */
-       CALL(sys_gettid)
-       CALL(sys_readahead)             /* 225 */
-       CALL(sys_setxattr)
-       CALL(sys_lsetxattr)
-       CALL(sys_fsetxattr)
-       CALL(sys_getxattr)
-       CALL(sys_lgetxattr)             /* 230 */
-       CALL(sys_fgetxattr)
-       CALL(sys_listxattr)
-       CALL(sys_llistxattr)
-       CALL(sys_flistxattr)
-       CALL(sys_removexattr)           /* 235 */
-       CALL(sys_lremovexattr)
-       CALL(sys_fremovexattr)
-       CALL(sys_tkill)
-       CALL(sys_sendfile64)
-       CALL(sys_futex)                 /* 240 */
-       CALL(sys_sched_setaffinity)
-       CALL(sys_sched_getaffinity)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)
-       CALL(sys_io_setup)              /* 245 */
-       CALL(sys_io_destroy)
-       CALL(sys_io_getevents)
-       CALL(sys_io_submit)
-       CALL(sys_io_cancel)
-       CALL(sys_fadvise64)             /* 250 */
-       CALL(sys_ni_syscall)
-       CALL(sys_exit_group)
-       CALL(sys_lookup_dcookie)
-       CALL(sys_epoll_create)
-       CALL(sys_epoll_ctl)             /* 255 */
-       CALL(sys_epoll_wait)
-       CALL(sys_ni_syscall)            /* sys_remap_file_pages */
-       CALL(sys_set_tid_address)
-       CALL(sys_timer_create)
-       CALL(sys_timer_settime)         /* 260 */
-       CALL(sys_timer_gettime)
-       CALL(sys_timer_getoverrun)
-       CALL(sys_timer_delete)
-       CALL(sys_clock_settime)
-       CALL(sys_clock_gettime)         /* 265 */
-       CALL(sys_clock_getres)
-       CALL(sys_clock_nanosleep)
-       CALL(sys_statfs64)
-       CALL(sys_fstatfs64)
-       CALL(sys_tgkill)                /* 270 */
-       CALL(sys_utimes)
-       CALL(sys_fadvise64_64)
-       CALL(sys_ni_syscall)            /* sys_vserver */
-       CALL(sys_ni_syscall)
-       CALL(sys_get_mempolicy)         /* 275 */
-       CALL(sys_set_mempolicy)
-       CALL(sys_mq_open)
-       CALL(sys_mq_unlink)
-       CALL(sys_mq_timedsend)
-       CALL(sys_mq_timedreceive)       /* 280 */
-       CALL(sys_mq_notify)
-       CALL(sys_mq_getsetattr)
-       CALL(sys_waitid)
-       CALL(sys_ni_syscall)            /* sys_kexec_load */
-       CALL(sys_add_key)               /* 285 */
-       CALL(sys_request_key)
-       CALL(sys_keyctl)
-       CALL(sys_ioprio_set)
-       CALL(sys_ioprio_get)            /* 290 */
-       CALL(sys_inotify_init)
-       CALL(sys_inotify_add_watch)
-       CALL(sys_inotify_rm_watch)
-       CALL(sys_migrate_pages)
-       CALL(sys_openat)                /* 295 */
-       CALL(sys_mkdirat)
-       CALL(sys_mknodat)
-       CALL(sys_fchownat)
-       CALL(sys_futimesat)
-       CALL(sys_fstatat64)             /* 300 */
-       CALL(sys_unlinkat)
-       CALL(sys_renameat)
-       CALL(sys_linkat)
-       CALL(sys_symlinkat)
-       CALL(sys_readlinkat)            /* 305 */
-       CALL(sys_fchmodat)
-       CALL(sys_faccessat)
-       CALL(sys_ni_syscall)            /* sys_pselect6 */
-       CALL(sys_ni_syscall)            /* sys_ppoll */
-       CALL(sys_unshare)               /* 310 */
-       CALL(sys_set_robust_list)
-       CALL(sys_get_robust_list)
-       CALL(sys_splice)
-       CALL(sys_sync_file_range)
-       CALL(sys_tee)                   /* 315 */
-       CALL(sys_vmsplice)
-       CALL(sys_ni_syscall)            /* sys_move_pages */
-       CALL(sys_getcpu)
-       CALL(sys_ni_syscall)            /* sys_epoll_pwait */
-       CALL(sys_setns)                 /* 320 */
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
deleted file mode 100644 (file)
index e0f7419..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/time.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Copied/hacked from:
- *
- *  linux/arch/m68k/kernel/time.c
- *
- *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
- *
- * This file contains the m68k-specific time handling details.
- * Most of the stuff is located in the machine specific files.
- *
- * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
- *             "A Kernel Model for Precision Timekeeping" by Dave Mills
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/timex.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq_regs.h>
-#include <asm/timer.h>
-
-#define        TICK_SIZE (tick_nsec / 1000)
-
-void h8300_timer_tick(void)
-{
-       if (current->pid)
-               profile_tick(CPU_PROFILING);
-       xtime_update(1);
-       update_process_times(user_mode(get_irq_regs()));
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
-       unsigned int year, mon, day, hour, min, sec;
-
-       /* FIX by dqg : Set to zero for platforms that don't have tod */
-       /* without this time is undefined and can overflow time_t, causing  */
-       /* very strange errors */
-       year = 1980;
-       mon = day = 1;
-       hour = min = sec = 0;
-#ifdef CONFIG_H8300_GETTOD
-       h8300_gettod (&year, &mon, &day, &hour, &min, &sec);
-#endif
-       if ((year += 1900) < 1970)
-               year += 100;
-       ts->tv_sec = mktime(year, mon, day, hour, min, sec);
-       ts->tv_nsec = 0;
-}
-
-void __init time_init(void)
-{
-
-       h8300_timer_setup();
-}
diff --git a/arch/h8300/kernel/timer/Makefile b/arch/h8300/kernel/timer/Makefile
deleted file mode 100644 (file)
index bef0510..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-# h8300 internal timer handler
-
-obj-$(CONFIG_H8300_TIMER8)  := timer8.o
-obj-$(CONFIG_H8300_TIMER16) := timer16.o
-obj-$(CONFIG_H8300_ITU)     := itu.o
-obj-$(CONFIG_H8300_TPU)     := tpu.o
diff --git a/arch/h8300/kernel/timer/itu.c b/arch/h8300/kernel/timer/itu.c
deleted file mode 100644 (file)
index 0a8b5cd..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/itu.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  ITU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-#if CONFIG_H8300_ITU_CH == 0
-#define ITUBASE        0xffff64
-#define ITUIRQ 24
-#elif CONFIG_H8300_ITU_CH == 1
-#define ITUBASE        0xffff6e
-#define ITUIRQ 28
-#elif CONFIG_H8300_ITU_CH == 2
-#define ITUBASE        0xffff78
-#define ITUIRQ 32
-#elif CONFIG_H8300_ITU_CH == 3
-#define ITUBASE        0xffff82
-#define ITUIRQ 36
-#elif CONFIG_H8300_ITU_CH == 4
-#define ITUBASE        0xffff92
-#define ITUIRQ 40
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR    0
-#define TIOR   1
-#define TIER   2
-#define TSR    3
-#define TCNT   4
-#define GRA    6
-#define GRB    8
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(IMFA, ITUBASE + TSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction itu_irq = {
-       .name           = "itu",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(ITUIRQ, &itu_irq);
-
-       /* initialize timer */
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, ITUBASE + TCR);
-       ctrl_outb(0x01, ITUBASE + TIER);
-       ctrl_outw(cnt, ITUBASE + GRA);
-       ctrl_bset(CONFIG_H8300_ITU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer16.c b/arch/h8300/kernel/timer/timer16.c
deleted file mode 100644 (file)
index 462d9f5..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/timer16.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  16bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-/* 16bit timer */
-#if CONFIG_H8300_TIMER16_CH == 0
-#define _16BASE        0xffff78
-#define _16IRQ 24
-#elif CONFIG_H8300_TIMER16_CH == 1
-#define _16BASE        0xffff80
-#define _16IRQ 28
-#elif CONFIG_H8300_TIMER16_CH == 2
-#define _16BASE        0xffff88
-#define _16IRQ 32
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR    0
-#define TIOR   1
-#define TCNT   2
-#define GRA    4
-#define GRB    6
-
-#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*10000 /* Timer input freq. */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(CONFIG_H8300_TIMER16_CH, TISRA);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction timer16_irq = {
-       .name           = "timer-16",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(_16IRQ, &timer16_irq);
-
-       /* initialize timer */
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, _16BASE + TCR);
-       ctrl_outw(cnt, _16BASE + GRA);
-       ctrl_bset(4 + CONFIG_H8300_TIMER16_CH, TISRA);
-       ctrl_bset(CONFIG_H8300_TIMER16_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c
deleted file mode 100644 (file)
index 505f341..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/cpu/timer/timer8.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  8bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/timer.h>
-#if defined(CONFIG_CPU_H8300H)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_CPU_H8S)
-#include <asm/regs267x.h>
-#endif
-
-/* 8bit timer x2 */
-#define CMFA   6
-
-#if defined(CONFIG_H8300_TIMER8_CH0)
-#define _8BASE _8TCR0
-#ifdef CONFIG_CPU_H8300H
-#define _8IRQ  36
-#endif
-#ifdef CONFIG_CPU_H8S
-#define _8IRQ  72
-#endif
-#elif defined(CONFIG_H8300_TIMER8_CH2)
-#ifdef CONFIG_CPU_H8300H
-#define _8BASE _8TCR2
-#define _8IRQ  40
-#endif
-#endif
-
-#ifndef _8BASE
-#error Unknown timer channel.
-#endif
-
-#define _8TCR  0
-#define _8TCSR 2
-#define TCORA  4
-#define TCORB  6
-#define _8TCNT 8
-
-#define CMIEA  0x40
-#define CCLR_CMA 0x08
-#define CKS2   0x04
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
- */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(CMFA, _8BASE + _8TCSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction timer8_irq = {
-       .name           = "timer-8",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {8, 64, 8192};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-       div++;
-
-       setup_irq(_8IRQ, &timer8_irq);
-
-#if defined(CONFIG_CPU_H8S)
-       /* Timer module enable */
-       ctrl_bclr(0, MSTPCRL)
-#endif
-
-       /* initialize timer */
-       ctrl_outw(cnt, _8BASE + TCORA);
-       ctrl_outw(0x0000, _8BASE + _8TCSR);
-       ctrl_outw((CMIEA|CCLR_CMA|CKS2) << 8 | div,
-                 _8BASE + _8TCR);
-}
diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c
deleted file mode 100644 (file)
index 0350f62..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/tpu.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  TPU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs267x.h>
-
-/* TPU */
-#if CONFIG_H8300_TPU_CH == 0
-#define TPUBASE        0xffffd0
-#define TPUIRQ 40
-#elif CONFIG_H8300_TPU_CH == 1
-#define TPUBASE        0xffffe0
-#define TPUIRQ 48
-#elif CONFIG_H8300_TPU_CH == 2
-#define TPUBASE        0xfffff0
-#define TPUIRQ 52
-#elif CONFIG_H8300_TPU_CH == 3
-#define TPUBASE        0xfffe80
-#define TPUIRQ 56
-#elif CONFIG_H8300_TPU_CH == 4
-#define TPUBASE        0xfffe90
-#define TPUIRQ 64
-#else
-#error Unknown timer channel.
-#endif
-
-#define _TCR   0
-#define _TMDR  1
-#define _TIOR  2
-#define _TIER  4
-#define _TSR   5
-#define _TCNT  6
-#define _GRA   8
-#define _GRB   10
-
-#define CCLR0  0x20
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(0, TPUBASE + _TSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction tpu_irq = {
-       .name           = "tpu",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {
-#if CONFIG_H8300_TPU_CH == 0
-       1,4,16,64,0,0,0,0,
-#elif (CONFIG_H8300_TPU_CH == 1) || (CONFIG_H8300_TPU_CH == 5)
-       1,4,16,64,0,0,256,0,
-#elif (CONFIG_H8300_TPU_CH == 2) || (CONFIG_H8300_TPU_CH == 4)
-       1,4,16,64,0,0,0,1024,
-#elif CONFIG_H8300_TPU_CH == 3
-       1,4,16,64,0,1024,256,4096,
-#endif
-};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int cnt;
-       unsigned int div;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(TPUIRQ, &tpu_irq);
-
-       /* TPU module enabled */
-       ctrl_bclr(3, MSTPCRH);
-
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, TPUBASE + _TCR);
-       ctrl_outb(0, TPUBASE + _TMDR);
-       ctrl_outw(0, TPUBASE + _TIOR);
-       ctrl_outb(0x01, TPUBASE + _TIER);
-       ctrl_outw(cnt, TPUBASE + _GRA);
-       ctrl_bset(CONFIG_H8300_TPU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
deleted file mode 100644 (file)
index cfe494d..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * linux/arch/h8300/boot/traps.c -- general exception handling code
- * H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
- * 
- * Cloned from Linux/m68k.
- *
- * No original Copyright holder listed,
- * Probable original (C) Roman Zippel (assigned DJD, 1999)
- *
- * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/bug.h>
-
-#include <asm/irq.h>
-#include <asm/traps.h>
-#include <asm/page.h>
-
-static DEFINE_SPINLOCK(die_lock);
-
-/*
- * this must be called very early as the kernel might
- * use some instruction that are emulated on the 060
- */
-
-void __init base_trap_init(void)
-{
-}
-
-void __init trap_init (void)
-{
-}
-
-asmlinkage void set_esp0 (unsigned long ssp)
-{
-       current->thread.esp0 = ssp;
-}
-
-/*
- *     Generic dumping code. Used for panic and debug.
- */
-
-static void dump(struct pt_regs *fp)
-{
-       unsigned long   *sp;
-       unsigned char   *tp;
-       int             i;
-
-       printk("\nCURRENT PROCESS:\n\n");
-       printk("COMM=%s PID=%d\n", current->comm, current->pid);
-       if (current->mm) {
-               printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
-                       (int) current->mm->start_code,
-                       (int) current->mm->end_code,
-                       (int) current->mm->start_data,
-                       (int) current->mm->end_data,
-                       (int) current->mm->end_data,
-                       (int) current->mm->brk);
-               printk("USER-STACK=%08x  KERNEL-STACK=%08lx\n\n",
-                       (int) current->mm->start_stack,
-                       (int) PAGE_SIZE+(unsigned long)current);
-       }
-
-       show_regs(fp);
-       printk("\nCODE:");
-       tp = ((unsigned char *) fp->pc) - 0x20;
-       for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
-               if ((i % 0x10) == 0)
-                       printk("\n%08x: ", (int) (tp + i));
-               printk("%08x ", (int) *sp++);
-       }
-       printk("\n");
-
-       printk("\nKERNEL STACK:");
-       tp = ((unsigned char *) fp) - 0x40;
-       for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
-               if ((i % 0x10) == 0)
-                       printk("\n%08x: ", (int) (tp + i));
-               printk("%08x ", (int) *sp++);
-       }
-       printk("\n");
-       if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
-                printk("(Possibly corrupted stack page??)\n");
-
-       printk("\n\n");
-}
-
-void die(const char *str, struct pt_regs *fp, unsigned long err)
-{
-       static int diecount;
-
-       oops_enter();
-
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       report_bug(fp->pc, fp);
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
-       dump(fp);
-
-       spin_unlock_irq(&die_lock);
-       do_exit(SIGSEGV);
-}
-
-extern char _start, _etext;
-#define check_kernel_text(addr) \
-        ((addr >= (unsigned long)(&_start)) && \
-         (addr <  (unsigned long)(&_etext))) 
-
-static int kstack_depth_to_print = 24;
-
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
-       unsigned long *stack,  addr;
-       int i;
-
-       if (esp == NULL)
-               esp = (unsigned long *) &esp;
-
-       stack = esp;
-
-       printk("Stack from %08lx:", (unsigned long)stack);
-       for (i = 0; i < kstack_depth_to_print; i++) {
-               if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
-                       break;
-               if (i % 8 == 0)
-                       printk("\n       ");
-               printk(" %08lx", *stack++);
-       }
-
-       printk("\nCall Trace:");
-       i = 0;
-       stack = esp;
-       while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) {
-               addr = *stack++;
-               /*
-                * If the address is either in the text segment of the
-                * kernel, or in the region which contains vmalloc'ed
-                * memory, it *may* be the address of a calling
-                * routine; if so, print it so that someone tracing
-                * down the cause of the crash will be able to figure
-                * out the call path that was taken.
-                */
-               if (check_kernel_text(addr)) {
-                       if (i % 4 == 0)
-                               printk("\n       ");
-                       printk(" [<%08lx>]", addr);
-                       i++;
-               }
-       }
-       printk("\n");
-}
-
-void show_trace_task(struct task_struct *tsk)
-{
-       show_stack(tsk,(unsigned long *)tsk->thread.esp0);
-}
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
deleted file mode 100644 (file)
index 3253fed..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-
-/* target memory map */
-#ifdef CONFIG_H8300H_GENERIC
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8300H_AKI3068NET
-#define ROMTOP  0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_H8MAX
-#define ROMTOP  0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_SIM
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8S_SIM
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#ifdef CONFIG_H8S_EDOSK2674
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)
-INPUT(romfs.o)
-#endif
-
-_jiffies = _jiffies_64 + 4;
-
-ENTRY(__start)
-
-SECTIONS
-{
-#if defined(CONFIG_ROMKERNEL)
-       . = ROMTOP; 
-       .vectors :
-       {
-       __vector = . ;
-               *(.vectors*)
-       }
-#else
-       . = RAMTOP; 
-       .bootvec :      
-       {
-               *(.bootvec)
-       }
-#endif
-        .text :
-       {
-       _text = .;
-#if defined(CONFIG_ROMKERNEL)
-       *(.int_redirect)
-#endif
-       __stext = . ;
-       TEXT_TEXT
-       SCHED_TEXT
-       LOCK_TEXT
-       __etext = . ;
-       }
-       EXCEPTION_TABLE(16)
-
-       RODATA
-#if defined(CONFIG_ROMKERNEL)
-       SECURITY_INIT
-#endif
-       ROEND = .; 
-#if defined(CONFIG_ROMKERNEL)
-       . = RAMTOP;
-       .data : AT(ROEND)
-#else
-       .data : 
-#endif
-       {
-       __sdata = . ;
-       ___data_start = . ;
-
-       INIT_TASK_DATA(0x2000)
-       . = ALIGN(0x4) ;
-               DATA_DATA
-       . = ALIGN(0x4) ;
-               *(.data.*)      
-
-       . = ALIGN(0x4) ;
-       ___init_begin = .;
-       __sinittext = .; 
-               INIT_TEXT
-       __einittext = .; 
-               INIT_DATA
-       . = ALIGN(0x4) ;
-       INIT_SETUP(0x4)
-       ___setup_start = .;
-               *(.init.setup)
-       . = ALIGN(0x4) ;
-       ___setup_end = .;
-       INIT_CALLS
-       CON_INITCALL
-               EXIT_TEXT
-               EXIT_DATA
-       INIT_RAM_FS
-       . = ALIGN(0x4) ;
-       ___init_end = .;
-       __edata = . ;
-       }
-#if defined(CONFIG_RAMKERNEL)
-       SECURITY_INIT
-#endif
-       __begin_data = LOADADDR(.data);
-        .bss : 
-        {
-       . = ALIGN(0x4) ;
-       __sbss = . ;
-       ___bss_start = . ;
-               *(.bss*)
-       . = ALIGN(0x4) ;
-               *(COMMON)
-       . = ALIGN(0x4) ;
-       ___bss_stop = . ;
-       __ebss = . ;
-       __end = . ;
-       __ramstart = .;
-       }
-        .romfs :       
-       {
-               *(.romfs*)
-       }
-       . = RAMTOP+RAMSIZE;
-        .dummy :
-        {
-       COMMAND_START = . - 0x200 ;
-       __ramend = . ;
-       }
-
-       DISCARDS
-}
diff --git a/arch/h8300/lib/Makefile b/arch/h8300/lib/Makefile
deleted file mode 100644 (file)
index 1577f50..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for H8/300-specific library files..
-#
-
-lib-y  = ashrdi3.o checksum.o memcpy.o memset.o abs.o romfs.o
diff --git a/arch/h8300/lib/abs.S b/arch/h8300/lib/abs.S
deleted file mode 100644 (file)
index ddd1fb3..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-;;; abs.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-       .text
-.global _abs
-
-;;; int abs(int n)
-_abs:
-       mov.l   er0,er0
-       bpl     1f
-       neg.l   er0
-1:
-       rts
-       
diff --git a/arch/h8300/lib/ashrdi3.c b/arch/h8300/lib/ashrdi3.c
deleted file mode 100644 (file)
index 78efb65..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
-/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING.  If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA.  */
-
-#define BITS_PER_UNIT 8
-
-typedef         int SItype     __attribute__ ((mode (SI)));
-typedef unsigned int USItype   __attribute__ ((mode (SI)));
-typedef                 int DItype     __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
-
-struct DIstruct {SItype high, low;};
-
-typedef union
-{
-  struct DIstruct s;
-  DItype ll;
-} DIunion;
-
-DItype
-__ashrdi3 (DItype u, word_type b)
-{
-  DIunion w;
-  word_type bm;
-  DIunion uu;
-
-  if (b == 0)
-    return u;
-
-  uu.ll = u;
-
-  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
-  if (bm <= 0)
-    {
-      /* w.s.high = 1..1 or 0..0 */
-      w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
-      w.s.low = uu.s.high >> -bm;
-    }
-  else
-    {
-      USItype carries = (USItype)uu.s.high << bm;
-      w.s.high = uu.s.high >> b;
-      w.s.low = ((USItype)uu.s.low >> b) | carries;
-    }
-
-  return w.ll;
-}
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
deleted file mode 100644 (file)
index bdc5b03..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IP/TCP/UDP checksumming routines
- *
- * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Tom May, <ftom@netcom.com>
- *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- *             Lots of code moved from tcp.c and ip.c; see those files
- *             for more names.
- *
- * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
- *             Fixed some nasty bugs, causing some horrible crashes.
- *             A: At some points, the sum (%0) was used as
- *             length-counter instead of the length counter
- *             (%1). Thanks to Roman Hodek for pointing this out.
- *             B: GCC seems to mess up if one uses too many
- *             data-registers to hold input values and one tries to
- *             specify d0 and d1 as scratch registers. Letting gcc choose these
- *      registers itself solves the problem.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
-   of the assembly has to go. */
-
-#include <net/checksum.h>
-#include <linux/module.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
-       /* add up 16-bit and 16-bit for 16+c bit */
-       x = (x & 0xffff) + (x >> 16);
-       /* add up carry.. */
-       x = (x & 0xffff) + (x >> 16);
-       return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
-       int odd, count;
-       unsigned long result = 0;
-
-       if (len <= 0)
-               goto out;
-       odd = 1 & (unsigned long) buff;
-       if (odd) {
-               result = *buff;
-               len--;
-               buff++;
-       }
-       count = len >> 1;               /* nr of 16-bit words.. */
-       if (count) {
-               if (2 & (unsigned long) buff) {
-                       result += *(unsigned short *) buff;
-                       count--;
-                       len -= 2;
-                       buff += 2;
-               }
-               count >>= 1;            /* nr of 32-bit words.. */
-               if (count) {
-                       unsigned long carry = 0;
-                       do {
-                               unsigned long w = *(unsigned long *) buff;
-                               count--;
-                               buff += 4;
-                               result += carry;
-                               result += w;
-                               carry = (w > result);
-                       } while (count);
-                       result += carry;
-                       result = (result & 0xffff) + (result >> 16);
-               }
-               if (len & 2) {
-                       result += *(unsigned short *) buff;
-                       buff += 2;
-               }
-       }
-       if (len & 1)
-               result += (*buff << 8);
-       result = from32to16(result);
-       if (odd)
-               result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
-       return result;
-}
-
-/*
- *     This is a version of ip_compute_csum() optimized for IP headers,
- *     which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-       return (__force __sum16)~do_csum(iph,ihl*4);
-}
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-/*
- * Egads...  That thing apparently assumes that *all* checksums it ever sees will
- * be folded.  Very likely a bug.
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
-       unsigned int result = do_csum(buff, len);
-
-       /* add in old sum, and carry.. */
-       result += (__force u32)sum;
-       /* 16+c bits -> 16 bits */
-       result = (result & 0xffff) + (result >> 16);
-       return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
-       return (__force __sum16)~do_csum(buff,len);
-}
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
-                           __wsum sum, int *csum_err)
-{
-       if (csum_err) *csum_err = 0;
-       memcpy(dst, (__force const void *)src, len);
-       return csum_partial(dst, len, sum);
-}
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
-       memcpy(dst, src, len);
-       return csum_partial(dst, len, sum);
-}
diff --git a/arch/h8300/lib/memcpy.S b/arch/h8300/lib/memcpy.S
deleted file mode 100644 (file)
index cad325e..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-;;; memcpy.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-
-       .text
-.global _memcpy
-
-;;; void *memcpy(void *to, void *from, size_t n)
-_memcpy:
-       mov.l   er2,er2
-       bne     1f
-       rts     
-1:     
-       ;; address check
-       bld     #0,r0l
-       bxor    #0,r1l
-       bcs     4f
-       mov.l   er4,@-sp
-       mov.l   er0,@-sp
-       btst    #0,r0l
-       beq     1f
-       ;; (aligned even) odd address
-       mov.b   @er1,r3l
-       mov.b   r3l,@er0
-       adds    #1,er1
-       adds    #1,er0
-       dec.l   #1,er2
-       beq     3f
-1:     
-       ;; n < sizeof(unsigned long) check
-       sub.l   er4,er4
-       adds    #4,er4          ; loop count check value
-       cmp.l   er4,er2
-       blo     2f
-       ;; unsigned long copy
-1:     
-       mov.l   @er1,er3
-       mov.l   er3,@er0
-       adds    #4,er0
-       adds    #4,er1
-       subs    #4,er2
-       cmp.l   er4,er2
-       bcc     1b      
-       ;; rest
-2:     
-       mov.l   er2,er2
-       beq     3f
-1:     
-       mov.b   @er1,r3l
-       mov.b   r3l,@er0
-       adds    #1,er1
-       adds    #1,er0
-       dec.l   #1,er2
-       bne     1b
-3:
-       mov.l   @sp+,er0
-       mov.l   @sp+,er4
-       rts
-
-       ;; odd <- even / even <- odd
-4:     
-       mov.l   er4,er3
-       mov.l   er2,er4
-       mov.l   er5,er2
-       mov.l   er1,er5
-       mov.l   er6,er1
-       mov.l   er0,er6
-1:
-       eepmov.w
-       mov.w   r4,r4
-       bne     1b
-       dec.w   #1,e4
-       bpl     1b
-       mov.l   er1,er6
-       mov.l   er2,er5
-       mov.l   er3,er4
-       rts
diff --git a/arch/h8300/lib/memset.S b/arch/h8300/lib/memset.S
deleted file mode 100644 (file)
index 4549a64..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/* memset.S */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-       .text
-
-.global        _memset
-
-;;void *memset(*ptr, int c, size_t count)
-;; ptr = er0
-;; c   = er1(r1l)
-;; count = er2
-_memset:
-       btst    #0,r0l
-       beq     2f
-
-       ;; odd address
-1:
-       mov.b   r1l,@er0
-       adds    #1,er0
-       dec.l   #1,er2
-       beq     6f
-
-       ;; even address
-2:
-       mov.l   er2,er3
-       cmp.l   #4,er2
-       blo     4f
-       ;; count>=4 -> count/4
-#if defined(__H8300H__)
-       shlr.l  er2
-       shlr.l  er2
-#endif
-#if defined(__H8300S__)
-       shlr.l  #2,er2
-#endif
-       ;; byte -> long
-       mov.b   r1l,r1h
-       mov.w   r1,e1
-3:
-       mov.l   er1,@er0
-       adds    #4,er0
-       dec.l   #1,er2
-       bne     3b
-4:
-       ;; count % 4
-       and.b   #3,r3l
-       beq     6f
-5:
-       mov.b   r1l,@er0
-       adds    #1,er0
-       dec.b   r3l
-       bne     5b
-6:
-       rts
diff --git a/arch/h8300/lib/romfs.S b/arch/h8300/lib/romfs.S
deleted file mode 100644 (file)
index 68910d8..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/* romfs move to __ebss */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-
-#define BLKOFFSET 512
-
-       .text
-.globl __move_romfs
-_romfs_sig_len = 8
-
-__move_romfs:  
-       mov.l   #__sbss,er0
-       mov.l   #_romfs_sig,er1
-       mov.b   #_romfs_sig_len,r3l
-1:                                     /* check romfs image */
-       mov.b   @er0+,r2l
-       mov.b   @er1+,r2h
-       cmp.b   r2l,r2h
-       bne     2f
-       dec.b   r3l
-       bne     1b
-
-       /* find romfs image */
-       mov.l   @__sbss+8,er0           /* romfs length(be) */
-       mov.l   #__sbss,er1
-       add.l   er0,er1                 /* romfs image end */
-       mov.l   #__ebss,er2
-       add.l   er0,er2                 /* distination address */
-#if defined(CONFIG_INTELFLASH)
-       add.l   #BLKOFFSET,er2
-#endif
-       adds    #2,er0
-       adds    #1,er0
-       shlr    er0
-       shlr    er0                     /* transfer length */
-1:
-       mov.l   @er1,er3                /* copy image */
-       mov.l   er3,@er2
-       subs    #4,er1
-       subs    #4,er2
-       dec.l   #1,er0
-       bpl     1b
-2:
-       rts
-
-       .section        .rodata
-_romfs_sig:    
-       .ascii  "-rom1fs-"
-
-       .end
diff --git a/arch/h8300/mm/Makefile b/arch/h8300/mm/Makefile
deleted file mode 100644 (file)
index 5f4bc42..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux m68k-specific parts of the memory manager.
-#
-
-obj-y   := init.o fault.o memory.o kmap.o
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
deleted file mode 100644 (file)
index 4725359..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  linux/arch/h8300/mm/fault.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *  Copyright (C) 2000  Lineo, Inc.  (www.lineo.com) 
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/fault.c
- *  linux/arch/m68k/mm/fault.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-
-#include <asm/pgtable.h>
-
-/*
- * This routine handles page faults.  It determines the problem, and
- * then passes it off to one of the appropriate routines.
- *
- * error_code:
- *     bit 0 == 0 means no page found, 1 means protection fault
- *     bit 1 == 0 means read, 1 means write
- *
- * If this routine detects a bad access, it returns 1, otherwise it
- * returns 0.
- */
-asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
-                             unsigned long error_code)
-{
-#ifdef DEBUG
-       printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
-               regs->sr, regs->pc, address, error_code);
-#endif
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-       if ((unsigned long) address < PAGE_SIZE) {
-               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-       } else
-               printk(KERN_ALERT "Unable to handle kernel access");
-       printk(" at virtual address %08lx\n",address);
-       if (!user_mode(regs))
-               die("Oops", regs, error_code);
-       do_exit(SIGKILL);
-
-       return 1;
-}
-
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
deleted file mode 100644 (file)
index 6c1251e..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  linux/arch/h8300/mm/init.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *                      Kenneth Albanowski <kjahds@kjahds.com>,
- *  Copyright (C) 2000  Lineo, Inc.  (www.lineo.com) 
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/init.c
- *  linux/arch/m68k/mm/init.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- *
- *  JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
- *  DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#undef DEBUG
-
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-static unsigned long empty_bad_page_table;
-
-static unsigned long empty_bad_page;
-
-unsigned long empty_zero_page;
-
-extern unsigned long rom_length;
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- * The parameters are pointers to where to stick the starting and ending
- * addresses of available kernel virtual memory.
- */
-void __init paging_init(void)
-{
-       /*
-        * Make sure start_mem is page aligned,  otherwise bootmem and
-        * page_alloc get different views og the world.
-        */
-#ifdef DEBUG
-       unsigned long start_mem = PAGE_ALIGN(memory_start);
-#endif
-       unsigned long end_mem   = memory_end & PAGE_MASK;
-
-#ifdef DEBUG
-       printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
-               start_mem, end_mem);
-#endif
-
-       /*
-        * Initialize the bad page table and bad page to point
-        * to a couple of allocated pages.
-        */
-       empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
-       /*
-        * Set up SFC/DFC registers (user data space).
-        */
-       set_fs (USER_DS);
-
-#ifdef DEBUG
-       printk ("before free_area_init\n");
-
-       printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
-               start_mem, end_mem);
-#endif
-
-       {
-               unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
-               zones_size[ZONE_DMA]     = 0 >> PAGE_SHIFT;
-               zones_size[ZONE_NORMAL]  = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
-#ifdef CONFIG_HIGHMEM
-               zones_size[ZONE_HIGHMEM] = 0;
-#endif
-               free_area_init(zones_size);
-       }
-}
-
-void __init mem_init(void)
-{
-       unsigned long codesize = _etext - _stext;
-
-       pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
-
-       high_memory = (void *) (memory_end & PAGE_MASK);
-       max_mapnr = MAP_NR(high_memory);
-
-       /* this will put all low memory onto the freelists */
-       free_all_bootmem();
-
-       mem_init_print_info(NULL);
-       if (rom_length > 0 && rom_length > codesize)
-               pr_info("Memory available: %luK/%luK ROM\n",
-                       (rom_length - codesize) >> 10, rom_length >> 10);
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void
-free_initmem(void)
-{
-#ifdef CONFIG_RAMKERNEL
-       free_initmem_default(-1);
-#endif
-}
-
diff --git a/arch/h8300/mm/kmap.c b/arch/h8300/mm/kmap.c
deleted file mode 100644 (file)
index f79edcd..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *  linux/arch/h8300/mm/kmap.c
- *  
- *  Based on
- *  linux/arch/m68knommu/mm/kmap.c
- *
- *  Copyright (C) 2000 Lineo, <davidm@snapgear.com>
- *  Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/io.h>
-
-#undef DEBUG
-
-#define VIRT_OFFSET (0x01000000)
-
-/*
- * Map some physical address range into the kernel address space.
- */
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
-{
-       return (void *)(physaddr + VIRT_OFFSET);
-}
-
-/*
- * Unmap a ioremap()ed region again.
- */
-void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
- */
-void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
-{
-}
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
deleted file mode 100644 (file)
index 06e3646..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  linux/arch/h8300/mm/memory.c
- *
- *  Copyright (C) 2002  Yoshinori Sato <ysato@users.sourceforge.jp>,
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/memory.c
- *
- *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
- *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- *
- *  Based on:
- *
- *  linux/arch/m68k/mm/memory.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/io.h>
-
-void cache_clear (unsigned long paddr, int len)
-{
-}
-
-
-void cache_push (unsigned long paddr, int len)
-{
-}
-
-void cache_push_v (unsigned long vaddr, int len)
-{
-}
-
-/*
- * Map some physical address range into the kernel address space.
- */
-
-unsigned long kernel_map(unsigned long paddr, unsigned long size,
-                        int nocacheflag, unsigned long *memavailp )
-{
-       return paddr;
-}
-
diff --git a/arch/h8300/platform/h8300h/Makefile b/arch/h8300/platform/h8300h/Makefile
deleted file mode 100644 (file)
index 420f73b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8/300H
-#
-
-obj-y := irq.o ptrace_h8300h.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/Makefile b/arch/h8300/platform/h8300h/aki3068net/Makefile
deleted file mode 100644 (file)
index b7ff780..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
deleted file mode 100644 (file)
index b2ad0f2..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        AE-3068 (aka. aki3068net)
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0xff,0xff
-       ;; P2DDR
-       .byte   0xff,0xff
-       ;; P3DDR
-       .byte   0xff,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x01,0x01
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x0c,0x0c
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x30,0x30
-
-__target_name: 
-       .asciz  "AE-3068"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8300h/generic/Makefile b/arch/h8300/platform/h8300h/generic/Makefile
deleted file mode 100644 (file)
index 2b12a17..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y :=  crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8300h/generic/crt0_ram.S b/arch/h8300/platform/h8300h/generic/crt0_ram.S
deleted file mode 100644 (file)
index 5ab7d9c..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/generic/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        AE-3068 (aka. aki3068net)
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_BLK_DEV_BLKMEM)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-
-__target_name: 
-       .asciz  "generic"
diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S
deleted file mode 100644 (file)
index dda1dfa..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/generic/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        generic
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy .data */
-#if !defined(CONFIG_H8300H_SIM)
-       /* copy .data */
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  er4
-       shlr.l  er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-#endif
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #__command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-vector =       1
-       .rept   64-1
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8300h/h8max/Makefile b/arch/h8300/platform/h8300h/h8max/Makefile
deleted file mode 100644 (file)
index b7ff780..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/h8max/crt0_ram.S b/arch/h8300/platform/h8300h/h8max/crt0_ram.S
deleted file mode 100644 (file)
index 6a0d4e2..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/h8max/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        H8MAX
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0xff,0xff
-       ;; P2DDR
-       .byte   0xff,0xff
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x01,0x01
-       ;; P6DDR
-       .byte   0xf6,0xf6
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0xee,0xee
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x30,0x30
-
-__target_name: 
-       .asciz  "H8MAX"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8300h/irq.c b/arch/h8300/platform/h8300h/irq.c
deleted file mode 100644 (file)
index 0a50353..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Interrupt handling H8/300H depend.
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs306x.h>
-
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
-       TRAP3_VEC,      /* TRAPA #3 is GDB breakpoint */
-#endif
-       -1,
-};
-
-const h8300_vector __initconst h8300_trap_table[] = {
-       0, 0, 0, 0, 0, 0, 0, 0,
-       system_call,
-       0,
-       0,
-       trace_break,
-};
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
-       int bitmask;
-       if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
-               return 0;
-
-       /* initialize IRQ pin */
-       bitmask = 1 << (irq - EXT_IRQ0);
-       switch(irq) {
-       case EXT_IRQ0:
-       case EXT_IRQ1:
-       case EXT_IRQ2:
-       case EXT_IRQ3:
-               if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0)
-                       return -EBUSY;
-               H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT);
-               break;
-       case EXT_IRQ4:
-       case EXT_IRQ5:
-               if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0)
-                       return -EBUSY;
-               H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT);
-               break;
-       }
-
-       return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
-       int bitmask;
-       if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
-               return;
-
-       /* disable interrupt & release IRQ pin */
-       bitmask = 1 << (irq - EXT_IRQ0);
-       switch(irq) {
-       case EXT_IRQ0:
-       case EXT_IRQ1:
-       case EXT_IRQ2:
-       case EXT_IRQ3:
-               *(volatile unsigned char *)IER &= ~bitmask;
-               H8300_GPIO_FREE(H8300_GPIO_P8, bitmask);
-               break ;
-       case EXT_IRQ4:
-       case EXT_IRQ5:
-               *(volatile unsigned char *)IER &= ~bitmask;
-               H8300_GPIO_FREE(H8300_GPIO_P9, bitmask);
-               break;
-       }
-}
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
deleted file mode 100644 (file)
index 4f1ed02..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/ptrace_h8300h.c
- *    ptrace cpu depend helper functions
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK 0x6f    /* mode/imask not set */
-#define BREAKINST 0x5730 /* trapa #3 */
-
-/* Mapping from PT_xxx to the stack offset at which the register is
-   saved.  Notice that usp has no stack-slot and needs to be treated
-   specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
-       PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
-       PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
-       PT_REG(ccr), PT_REG(pc)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
-       switch (regno) {
-       case PT_USP:
-               return task->thread.usp + sizeof(long)*2;
-       case PT_CCR:
-           return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-       default:
-           return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
-       }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
-       unsigned short oldccr;
-       switch (regno) {
-       case PT_USP:
-               task->thread.usp = data - sizeof(long)*2;
-       case PT_CCR:
-               oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-               oldccr &= ~CCR_MASK;
-               data &= CCR_MASK;
-               data |= oldccr;
-               *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       default:
-               *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       }
-       return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
-       if((long)child->thread.breakinfo.addr != -1L) {
-               *child->thread.breakinfo.addr = child->thread.breakinfo.inst;
-               child->thread.breakinfo.addr = (unsigned short *)-1L;
-       }
-}
-
-/* calculate next pc */
-enum jump_type {none,    /* normal instruction */
-               jabs,    /* absolute address jump */
-               ind,     /* indirect address jump */
-               ret,     /* return to subrutine */
-               reg,     /* register indexed jump */
-               relb,    /* pc relative jump (byte offset) */
-               relw,    /* pc relative jump (word offset) */
-               };
-
-/* opcode decode table define
-   ptn: opcode pattern
-   msk: opcode bitmask
-   len: instruction length (<0 next table index)
-   jmp: jump operation mode */
-struct optable {
-       unsigned char bitpattern;
-       unsigned char bitmask;
-       signed char length;
-       signed char type;
-} __attribute__((aligned(1),packed));
-
-#define OPTABLE(ptn,msk,len,jmp)   \
-        {                          \
-               .bitpattern = ptn, \
-               .bitmask    = msk, \
-               .length     = len, \
-               .type       = jmp, \
-       }
-
-static const struct optable optable_0[] = {
-       OPTABLE(0x00,0xff, 1,none), /* 0x00 */
-       OPTABLE(0x01,0xff,-1,none), /* 0x01 */
-       OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */
-       OPTABLE(0x04,0xee, 1,none), /* 0x04-0x05/0x14-0x15 */
-       OPTABLE(0x06,0xfe, 1,none), /* 0x06-0x07 */
-       OPTABLE(0x08,0xea, 1,none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */
-       OPTABLE(0x0a,0xee, 1,none), /* 0x0a-0x0b/0x1a-0x1b */
-       OPTABLE(0x0e,0xee, 1,none), /* 0x0e-0x0f/0x1e-0x1f */
-       OPTABLE(0x10,0xfc, 1,none), /* 0x10-0x13 */
-       OPTABLE(0x16,0xfe, 1,none), /* 0x16-0x17 */
-       OPTABLE(0x20,0xe0, 1,none), /* 0x20-0x3f */
-       OPTABLE(0x40,0xf0, 1,relb), /* 0x40-0x4f */
-       OPTABLE(0x50,0xfc, 1,none), /* 0x50-0x53 */
-       OPTABLE(0x54,0xfd, 1,ret ), /* 0x54/0x56 */
-       OPTABLE(0x55,0xff, 1,relb), /* 0x55 */
-       OPTABLE(0x57,0xff, 1,none), /* 0x57 */
-       OPTABLE(0x58,0xfb, 2,relw), /* 0x58/0x5c */
-       OPTABLE(0x59,0xfb, 1,reg ), /* 0x59/0x5b */
-       OPTABLE(0x5a,0xfb, 2,jabs), /* 0x5a/0x5e */
-       OPTABLE(0x5b,0xfb, 2,ind ), /* 0x5b/0x5f */
-       OPTABLE(0x60,0xe8, 1,none), /* 0x60-0x67/0x70-0x77 */
-       OPTABLE(0x68,0xfa, 1,none), /* 0x68-0x69/0x6c-0x6d */
-       OPTABLE(0x6a,0xfe,-2,none), /* 0x6a-0x6b */
-       OPTABLE(0x6e,0xfe, 2,none), /* 0x6e-0x6f */
-       OPTABLE(0x78,0xff, 4,none), /* 0x78 */
-       OPTABLE(0x79,0xff, 2,none), /* 0x79 */
-       OPTABLE(0x7a,0xff, 3,none), /* 0x7a */
-       OPTABLE(0x7b,0xff, 2,none), /* 0x7b */
-       OPTABLE(0x7c,0xfc, 2,none), /* 0x7c-0x7f */
-       OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */
-};
-
-static const struct optable optable_1[] = {
-       OPTABLE(0x00,0xff,-3,none), /* 0x0100 */
-       OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */
-       OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */
-       OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */
-};
-
-static const struct optable optable_2[] = {
-       OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
-       OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
-};
-
-static const struct optable optable_3[] = {
-       OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */
-       OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */
-       OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */
-       OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */
-};
-
-static const struct optable optable_4[] = {
-       OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */
-       OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */
-};
-
-static const struct optables_list {
-       const struct optable *ptr;
-       int size;
-} optables[] = {
-#define OPTABLES(no)                                                   \
-        {                                                              \
-               .ptr  = optable_##no,                                  \
-               .size = sizeof(optable_##no) / sizeof(struct optable), \
-       }
-       OPTABLES(0),
-       OPTABLES(1),
-       OPTABLES(2),
-       OPTABLES(3),
-       OPTABLES(4),
-
-};
-
-const unsigned char condmask[] = {
-       0x00,0x40,0x01,0x04,0x02,0x08,0x10,0x20
-};
-
-static int isbranch(struct task_struct *task,int reson)
-{
-       unsigned char cond = h8300_get_reg(task, PT_CCR);
-       /* encode complex conditions */
-       /* B4: N^V
-          B5: Z|(N^V)
-          B6: C|Z */
-       __asm__("bld #3,%w0\n\t"
-               "bxor #1,%w0\n\t"
-               "bst #4,%w0\n\t"
-               "bor #2,%w0\n\t"
-               "bst #5,%w0\n\t"
-               "bld #2,%w0\n\t"
-               "bor #0,%w0\n\t"
-               "bst #6,%w0\n\t"
-               :"=&r"(cond)::"cc");
-       cond &= condmask[reson >> 1];
-       if (!(reson & 1))
-               return cond == 0;
-       else
-               return cond != 0;
-}
-
-static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
-{
-       const struct optable *op;
-       unsigned char *fetch_p;
-       unsigned char inst;
-       unsigned long addr;
-       unsigned long *sp;
-       int op_len,regno;
-       op = optables[0].ptr;
-       op_len = optables[0].size;
-       fetch_p = (unsigned char *)pc;
-       inst = *fetch_p++;
-       do {
-               if ((inst & op->bitmask) == op->bitpattern) {
-                       if (op->length < 0) {
-                               op = optables[-op->length].ptr;
-                               op_len = optables[-op->length].size + 1;
-                               inst = *fetch_p++;
-                       } else {
-                               switch (op->type) {
-                               case none:
-                                       return pc + op->length;
-                               case jabs:
-                                       addr = *(unsigned long *)pc;
-                                       return (unsigned short *)(addr & 0x00ffffff);
-                               case ind:
-                                       addr = *pc & 0xff;
-                                       return (unsigned short *)(*(unsigned long *)addr);
-                               case ret:
-                                       sp = (unsigned long *)h8300_get_reg(child, PT_USP);
-                                       /* user stack frames
-                                          |   er0  | temporary saved
-                                          +--------+
-                                          |   exp  | exception stack frames
-                                          +--------+
-                                          | ret pc | userspace return address
-                                       */
-                                       return (unsigned short *)(*(sp+2) & 0x00ffffff);
-                               case reg:
-                                       regno = (*pc >> 4) & 0x07;
-                                       if (regno == 0)
-                                               addr = h8300_get_reg(child, PT_ER0);
-                                       else
-                                               addr = h8300_get_reg(child, regno-1+PT_ER1);
-                                       return (unsigned short *)addr;
-                               case relb:
-                                       if (inst == 0x55 || isbranch(child,inst & 0x0f))
-                                               pc = (unsigned short *)((unsigned long)pc +
-                                                                      ((signed char)(*fetch_p)));
-                                       return pc+1; /* skip myself */
-                               case relw:
-                                       if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
-                                               pc = (unsigned short *)((unsigned long)pc +
-                                                                      ((signed short)(*(pc+1))));
-                                       return pc+2; /* skip myself */
-                               }
-                       }
-               } else
-                       op++;
-       } while(--op_len > 0);
-       return NULL;
-}
-
-/* Set breakpoint(s) to simulate a single step from the current PC.  */
-
-void user_enable_single_step(struct task_struct *child)
-{
-       unsigned short *nextpc;
-       nextpc = getnextpc(child,(unsigned short *)h8300_get_reg(child, PT_PC));
-       child->thread.breakinfo.addr = nextpc;
-       child->thread.breakinfo.inst = *nextpc;
-       *nextpc = BREAKINST;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
-       if ((unsigned long)current->thread.breakinfo.addr == bp) {
-               user_disable_single_step(current);
-               force_sig(SIGTRAP,current);
-       } else
-               force_sig(SIGILL,current);
-}
-
diff --git a/arch/h8300/platform/h8s/Makefile b/arch/h8300/platform/h8s/Makefile
deleted file mode 100644 (file)
index bf12418..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8S
-#
-
-obj-y := ints_h8s.o ptrace_h8s.o
diff --git a/arch/h8300/platform/h8s/edosk2674/Makefile b/arch/h8300/platform/h8s/edosk2674/Makefile
deleted file mode 100644 (file)
index 8e34972..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
deleted file mode 100644 (file)
index 5ed191b..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        EDOSK-2674
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-                       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-       ldc     #0x00,exr
-
-       /* Peripheral Setup */
-       bclr    #4,@INTCR:8     /* interrupt mode 2 */
-       bset    #5,@INTCR:8
-       bclr    #0,@IER+1:16
-       bset    #1,@ISCRL+1:16  /* IRQ0 Positive Edge */
-       bclr    #0,@ISCRL+1:16
-
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   er5,er6
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    #2,er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       ;;      used,ddr
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x3f,0x3a
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0xff,0xff
-       ;; PBDDR
-       .byte   0xff,0x00
-       ;; PCDDR
-       .byte   0xff,0x00
-       ;; PDDDR
-       .byte   0xff,0x00
-       ;; PEDDR
-       .byte   0xff,0x00
-       ;; PFDDR
-       .byte   0xff,0xff
-       ;; PGDDR
-       .byte   0x0f,0x0f
-       ;; PHDDR
-       .byte   0x0f,0x0f
-
-__target_name: 
-       .asciz  "EDOSK-2674"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
deleted file mode 100644 (file)
index 06d1d7f..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        EDOSK-2674
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-               
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-       ldc     #0,exr
-       
-       /* Peripheral Setup */
-;BSC/GPIO setup
-       mov.l   #init_regs,er0
-       mov.w   #0xffff,e2
-1:
-       mov.w   @er0+,r2
-       beq     2f
-       mov.w   @er0+,r1
-       mov.b   r1l,@er2
-       bra     1b
-
-2:
-;SDRAM setup
-#define SDRAM_SMR 0x400040
-
-       mov.b   #0,r0l
-       mov.b   r0l,@DRACCR:16
-       mov.w   #0x188,r0
-       mov.w   r0,@REFCR:16
-       mov.w   #0x85b4,r0
-       mov.w   r0,@DRAMCR:16
-       mov.b   #0,r1l
-       mov.b   r1l,@SDRAM_SMR
-       mov.w   #0x84b4,r0
-       mov.w   r0,@DRAMCR:16
-;special thanks to Arizona Cooperative Power
-       
-       /* copy .data */
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  #2,er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr.l  #2,er4          
-       sub.l   er0,er0
-1:
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #__command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-#define INIT_REGS_DATA(REGS,DATA) \
-       .word   ((REGS) & 0xffff),DATA
-
-init_regs:
-INIT_REGS_DATA(ASTCR,0xff)
-INIT_REGS_DATA(RDNCR,0x00)
-INIT_REGS_DATA(ABWCR,0x80)
-INIT_REGS_DATA(WTCRAH,0x27)
-INIT_REGS_DATA(WTCRAL,0x77)
-INIT_REGS_DATA(WTCRBH,0x71)
-INIT_REGS_DATA(WTCRBL,0x22)
-INIT_REGS_DATA(CSACRH,0x80)
-INIT_REGS_DATA(CSACRL,0x80)
-INIT_REGS_DATA(BROMCRH,0xa0)
-INIT_REGS_DATA(BROMCRL,0xa0)
-INIT_REGS_DATA(P3DDR,0x3a)
-INIT_REGS_DATA(P3ODR,0x06)
-INIT_REGS_DATA(PADDR,0xff)
-INIT_REGS_DATA(PFDDR,0xfe)
-INIT_REGS_DATA(PGDDR,0x0f)
-INIT_REGS_DATA(PHDDR,0x0f)
-INIT_REGS_DATA(PFCR0,0xff)
-INIT_REGS_DATA(PFCR2,0x0d)
-INIT_REGS_DATA(ITSR, 0x00)
-INIT_REGS_DATA(ITSR+1,0x3f)
-INIT_REGS_DATA(INTCR,0x20)
-               
-       .word   0
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "EDOSK-2674"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-       .long   __start
-vector =       2
-       .rept   126
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8s/generic/Makefile b/arch/h8300/platform/h8s/generic/Makefile
deleted file mode 100644 (file)
index 44b4685..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y =  crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S
deleted file mode 100644 (file)
index 7018915..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        generic
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-                       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-       ldc     #0x00,exr
-
-       /* Peripheral Setup */
-       bclr    #4,@INTCR:8     /* interrupt mode 2 */
-       bset    #5,@INTCR:8
-
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   er5,er6
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    #2,er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       ;;      used,ddr
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S
deleted file mode 100644 (file)
index 623ba78..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/generic/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup 
- *  Target Archtecture:        generic
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-       ldc     #0,exr
-       bclr    #4,@INTCR:8
-       bset    #5,@INTCR:8     /* Interrupt mode 2 */
-       
-       /* Peripheral Setup */
-       
-       /* copy .data */
-#if !defined(CONFIG_H8S_SIM)
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  #2,er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-#endif
-
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr.l  #2,er4          
-       sub.l   er0,er0
-1:
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-       .long   __start
-vector =       2
-       .rept   126-1
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8s/irq.c b/arch/h8300/platform/h8s/irq.c
deleted file mode 100644 (file)
index f3a5511..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/ints_h8s.c
- * Interrupt handling CPU variants
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs267x.h>
-
-/* saved vector list */
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
-       TRACE_VEC,
-       TRAP3_VEC,
-#endif
-       -1
-};
-
-/* trap entry table */
-const H8300_VECTOR __initconst h8300_trap_table[] = {
-       0,0,0,0,0,
-       trace_break,  /* TRACE */
-       0,0,
-       system_call,  /* TRAPA #0 */
-       0,0,0,0,0,0,0
-};
-
-/* IRQ pin assignment */
-struct irq_pins {
-       unsigned char port_no;
-       unsigned char bit_no;
-} __attribute__((aligned(1),packed));
-/* ISTR = 0 */
-static const struct irq_pins irq_assign_table0[16]={
-        {H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1},
-       {H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3},
-       {H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5},
-       {H8300_GPIO_P5,H8300_GPIO_B6},{H8300_GPIO_P5,H8300_GPIO_B7},
-       {H8300_GPIO_P6,H8300_GPIO_B0},{H8300_GPIO_P6,H8300_GPIO_B1},
-       {H8300_GPIO_P6,H8300_GPIO_B2},{H8300_GPIO_P6,H8300_GPIO_B3},
-       {H8300_GPIO_P6,H8300_GPIO_B4},{H8300_GPIO_P6,H8300_GPIO_B5},
-       {H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2},
-};
-/* ISTR = 1 */
-static const struct irq_pins irq_assign_table1[16]={
-       {H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1},
-       {H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3},
-       {H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5},
-       {H8300_GPIO_PH,H8300_GPIO_B2},{H8300_GPIO_PH,H8300_GPIO_B3},
-       {H8300_GPIO_P2,H8300_GPIO_B0},{H8300_GPIO_P2,H8300_GPIO_B1},
-       {H8300_GPIO_P2,H8300_GPIO_B2},{H8300_GPIO_P2,H8300_GPIO_B3},
-       {H8300_GPIO_P2,H8300_GPIO_B4},{H8300_GPIO_P2,H8300_GPIO_B5},
-       {H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7},
-};
-
-/* IRQ to GPIO pin translation */
-#define IRQ_GPIO_MAP(irqbit,irq,port,bit)                        \
-do {                                                             \
-       if (*(volatile unsigned short *)ITSR & irqbit) {          \
-               port = irq_assign_table1[irq - EXT_IRQ0].port_no; \
-               bit  = irq_assign_table1[irq - EXT_IRQ0].bit_no;  \
-       } else {                                                  \
-               port = irq_assign_table0[irq - EXT_IRQ0].port_no; \
-               bit  = irq_assign_table0[irq - EXT_IRQ0].bit_no;  \
-       }                                                         \
-} while(0)
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
-       if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
-               unsigned short ptn = 1 << (irq - EXT_IRQ0);
-               unsigned int port_no,bit_no;
-               IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
-               if (H8300_GPIO_RESERVE(port_no, bit_no) == 0)
-                       return -EBUSY;                   /* pin already use */
-               H8300_GPIO_DDR(port_no, bit_no, H8300_GPIO_INPUT);
-               *(volatile unsigned short *)ISR &= ~ptn; /* ISR clear */
-       }
-
-       return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
-       if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
-               /* disable interrupt & release IRQ pin */
-               unsigned short ptn = 1 << (irq - EXT_IRQ0);
-               unsigned short port_no,bit_no;
-               *(volatile unsigned short *)ISR &= ~ptn;
-               *(volatile unsigned short *)IER &= ~ptn;
-               IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
-               H8300_GPIO_FREE(port_no, bit_no);
-       }
-}
diff --git a/arch/h8300/platform/h8s/ptrace_h8s.c b/arch/h8300/platform/h8s/ptrace_h8s.c
deleted file mode 100644 (file)
index c058ab1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/ptrace_h8s.c
- *    ptrace cpu depend helper functions
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK  0x6f
-#define EXR_TRACE 0x80
-
-/* Mapping from PT_xxx to the stack offset at which the register is
-   saved.  Notice that usp has no stack-slot and needs to be treated
-   specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
-       PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
-       PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
-       PT_REG(ccr), PT_REG(pc),  0,           PT_REG(exr)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
-       switch (regno) {
-       case PT_USP:
-               return task->thread.usp + sizeof(long)*2 + 2;
-       case PT_CCR:
-       case PT_EXR:
-           return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-       default:
-           return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
-       }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
-       unsigned short oldccr;
-       switch (regno) {
-       case PT_USP:
-               task->thread.usp = data - sizeof(long)*2 - 2;
-       case PT_CCR:
-               oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-               oldccr &= ~CCR_MASK;
-               data &= CCR_MASK;
-               data |= oldccr;
-               *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       case PT_EXR:
-               /* exr modify not support */
-               return -EIO;
-       default:
-               *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       }
-       return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
-       *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) &= ~EXR_TRACE;
-}
-
-/* enable singlestep */
-void user_enable_single_step(struct task_struct *child)
-{
-       *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) |= EXR_TRACE;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
-       (void)bp;
-       force_sig(SIGTRAP,current);
-}
-
index 7740ab10a17192cb0596d0ee442282d549b3ba33..b10d61bc0f2ad4e0d721b25b40b0a205619fc7b7 100644 (file)
@@ -6,6 +6,7 @@ menu "Processor type and features"
 
 config IA64
        bool
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select PCI if (!IA64_HP_SIM)
        select ACPI if (!IA64_HP_SIM)
        select PM if (!IA64_HP_SIM)
index 556d0701a155351e844960aa00b51c55c820a3b9..c25302fb48d95636b59670efa423a59ff2d055c0 100644 (file)
@@ -85,4 +85,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_IA64_SOCKET_H */
index 5eb71d22c3d5901030eec5c9ead78dc1279d815e..59d52e3aef125b79b67afdaf1fcffd2b892367ff 100644 (file)
@@ -882,40 +882,10 @@ __init void prefill_possible_map(void)
                set_cpu_possible(i, true);
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       struct acpi_madt_local_sapic *lsapic;
        cpumask_t tmp_map;
-       int cpu, physid;
-
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-               return -EINVAL;
-
-       if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
-
-       obj = buffer.pointer;
-       if (obj->type != ACPI_TYPE_BUFFER)
-       {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
-       if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
-           (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       physid = ((lsapic->id << 8) | (lsapic->eid));
-
-       kfree(buffer.pointer);
-       buffer.length = ACPI_ALLOCATE_BUFFER;
-       buffer.pointer = NULL;
+       int cpu;
 
        cpumask_complement(&tmp_map, cpu_present_mask);
        cpu = cpumask_first(&tmp_map);
@@ -934,9 +904,9 @@ static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       return _acpi_map_lsapic(handle, pcpu);
+       return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
index 24be7c8da86ad3cbbdee6be0933fe302c9adaf02..52966650114f3198df49c109c02d70ae82d8adcb 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_M32R_SOCKET_H */
index 311a300d48cca82b82a5faa0bf6abbaee42cacef..75f25a8e300170ce85130da8fd2f739faa983059 100644 (file)
@@ -1,6 +1,7 @@
 config M68K
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
        select HAVE_IDE
        select HAVE_AOUT if MMU
        select HAVE_DEBUG_BUGVERBOSE
index 697d50393dd07d8db81b73e22544aa3fc325d24d..47365b1ccbecfeb9c87f807ad5ab345649572114 100644 (file)
@@ -85,7 +85,7 @@ static int fd_request_irq(void)
 {
        if(MACH_IS_Q40)
                return request_irq(FLOPPY_IRQ, floppy_hardint,
-                                  IRQF_DISABLED, "floppy", floppy_hardint);
+                                  0, "floppy", floppy_hardint);
        else if(MACH_IS_SUN3X)
                return sun3xflop_request_irq();
        return -ENXIO;
index 95231e2f9d646efb00181fbcac41d1cbc637002c..a02ea3a7bb20a0299e0f1bd2bf2306c484a4bc92 100644 (file)
@@ -207,7 +207,7 @@ static int sun3xflop_request_irq(void)
        if(!once) {
                once = 1;
                error = request_irq(FLOPPY_IRQ, sun3xflop_hardint,
-                                   IRQF_DISABLED, "floppy", NULL);
+                                   0, "floppy", NULL);
                return ((error == 0) ? 0 : -1);
        } else return 0;
 }
index 639c731568b0046d2150af8da76c991a55a45855..3fadc4a93d977ec96456bf194b7a34a97daa495e 100644 (file)
@@ -3,3 +3,10 @@
 #else
 #include <asm/uaccess_mm.h>
 #endif
+
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned(x, ptr)   __get_user((x), (ptr))
+#define __put_user_unaligned(x, ptr)   __put_user((x), (ptr))
+#endif
index ec30acbfe6db0507a74e2495db5ea24b31ca0e8d..99a98698bc959035fb89c2d004a02800abcf81a4 100644 (file)
@@ -70,7 +70,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
 static struct irqaction m68328_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = hw_tick,
 };
 
index 0570741e5500b221003e3295f4596673cfa53b4b..d493ac43fe3f900fe1b6ecd999e3452e96bdbfd3 100644 (file)
@@ -59,7 +59,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
 static struct irqaction m68360_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = hw_tick,
 };
 
index e8f3b97b0f7706ddcbc8dfa33bd412d4c4f84c20..493b3111d4c12b6d96e139a2e96afb84d022f558 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t pit_tick(int irq, void *dummy)
 
 static struct irqaction pit_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = pit_tick,
 };
 
index bb5a25ada84872420e1ac518e85ac647dc5aa818..831a08cf6f40d7e6e6c28594035a75e0c6a8b1c7 100644 (file)
@@ -51,7 +51,7 @@ irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
 
 static struct irqaction mcfslt_profile_irq = {
        .name    = "profile timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcfslt_profile_tick,
 };
 
@@ -93,7 +93,7 @@ static irqreturn_t mcfslt_tick(int irq, void *dummy)
 
 static struct irqaction mcfslt_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcfslt_tick,
 };
 
index d06068e457643fc804f0b068679da230e640becb..cd496a20fcc7ced7b802e705badcae2417eb2256 100644 (file)
@@ -83,7 +83,7 @@ static irqreturn_t mcftmr_tick(int irq, void *dummy)
 
 static struct irqaction mcftmr_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcftmr_tick,
 };
 
@@ -171,7 +171,7 @@ irqreturn_t coldfire_profile_tick(int irq, void *dummy)
 
 static struct irqaction coldfire_profile_irq = {
        .name    = "profile timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = coldfire_profile_tick,
 };
 
index b82f82b743199ac4ff5b54d8cdbe6cc8c1246078..8370114e78aa4cea65a3ab5c44abbd19c4d39236 100644 (file)
@@ -1,5 +1,6 @@
 config MICROBLAZE
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_FUNCTION_TRACER
index d9d81c2192534b48062fb3f4923ee8ed994dcc7a..6e239123d6fe05e4a4ab2118f76816971c9a522a 100644 (file)
@@ -20,7 +20,6 @@ platforms += mti-sead3
 platforms += netlogic
 platforms += pmcs-msp71xx
 platforms += pnx833x
-platforms += powertv
 platforms += ralink
 platforms += rb532
 platforms += sgi-ip22
index f75ab4a2f2460a0d5652cbcacf25c2b68a2d24c3..04957828d1b2e19915bcf0dd659d7319e0d66ae0 100644 (file)
@@ -1,6 +1,7 @@
 config MIPS
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_CONTEXT_TRACKING
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IDE
@@ -8,6 +9,7 @@ config MIPS
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_ARCH_KGDB
+       select HAVE_ARCH_TRACEHOOK
        select ARCH_HAVE_CUSTOM_GPIO_H
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -18,6 +20,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_DEBUG_KMEMLEAK
+       select HAVE_SYSCALL_TRACEPOINTS
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
        select RTC_LIB if !MACH_LOONGSON
@@ -146,6 +149,7 @@ config MIPS_COBALT
        select CSRC_R4K
        select CEVT_GT641XX
        select DMA_NONCOHERENT
+       select EARLY_PRINTK_8250 if EARLY_PRINTK
        select HW_HAS_PCI
        select I8253
        select I8259
@@ -412,23 +416,6 @@ config PMC_MSP
          of integrated peripherals, interfaces and DSPs in addition to
          a variety of MIPS cores.
 
-config POWERTV
-       bool "Cisco PowerTV"
-       select BOOT_ELF32
-       select CEVT_R4K
-       select CPU_MIPSR2_IRQ_VI
-       select CPU_MIPSR2_IRQ_EI
-       select CSRC_POWERTV
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select SYS_HAS_CPU_MIPS32_R2
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_BIG_ENDIAN
-       select SYS_SUPPORTS_HIGHMEM
-       select USB_OHCI_LITTLE_ENDIAN
-       help
-         This enables support for the Cisco PowerTV Platform.
-
 config RALINK
        bool "Ralink based machines"
        select CEVT_R4K
@@ -811,7 +798,6 @@ source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
-source "arch/mips/powertv/Kconfig"
 source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
 source "arch/mips/sibyte/Kconfig"
@@ -890,9 +876,6 @@ config CSRC_BCM1480
 config CSRC_IOASIC
        bool
 
-config CSRC_POWERTV
-       bool
-
 config CSRC_R4K
        bool
 
@@ -1489,8 +1472,10 @@ config SYS_SUPPORTS_ZBOOT
        bool
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_LZ4
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
+       select HAVE_KERNEL_XZ
 
 config SYS_SUPPORTS_ZBOOT_UART16550
        bool
@@ -1977,6 +1962,7 @@ config MIPS_VPE_APSP_API
 config MIPS_CMP
        bool "MIPS CMP framework support"
        depends on SYS_SUPPORTS_MIPS_CMP
+       select SMP
        select SYNC_R4K
        select SYS_SUPPORTS_SMP
        select SYS_SUPPORTS_SCHED_SMT if SMP
index 37871f0de15eca8c817f38b6dcdbb2b3b9c056b4..b147e7038ff0cb26042555ee6d10eafd8d070ca2 100644 (file)
@@ -20,6 +20,14 @@ config EARLY_PRINTK
          doesn't cooperate with an X server. You should normally say N here,
          unless you want to debug such a crash.
 
+config EARLY_PRINTK_8250
+       bool "8250/16550 and compatible serial early printk driver"
+       depends on EARLY_PRINTK
+       default n
+       help
+         If you say Y here, it will be possible to use a 8250/16550 serial
+         port as the boot console.
+
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        default n
index ca8f8340d75f535b1cad5c1c8eed14bbc71f9634..de300b9936076faad63463c884687d9e19ea914f 100644 (file)
@@ -285,15 +285,19 @@ endif
 # Other need ECOFF, so we build a 32-bit ELF binary for them which we then
 # convert to ECOFF using elf2ecoff.
 #
+quiet_cmd_32 = OBJCOPY $@
+       cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
 vmlinux.32: vmlinux
-       $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+       $(call cmd,32)
 
 #
 # The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
 # ELF files from 32-bit files by conversion.
 #
+quiet_cmd_64 = OBJCOPY $@
+       cmd_64 = $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
 vmlinux.64: vmlinux
-       $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
+       $(call cmd,64)
 
 all:   $(all-y)
 
@@ -302,10 +306,16 @@ $(boot-y): $(vmlinux-32) FORCE
        $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
                $(bootvars-y) arch/mips/boot/$@
 
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
 # boot/compressed
 $(bootz-y): $(vmlinux-32) FORCE
        $(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
                $(bootvars-y) 32bit-bfd=$(32bit-bfd) $@
+else
+vmlinuz: FORCE
+       @echo '   CONFIG_SYS_SUPPORTS_ZBOOT is not enabled'
+       /bin/false
+endif
 
 
 CLEAN_FILES += vmlinux.32 vmlinux.64
index 4a9baa9f63300cab298d913009c3d840d8adbc12..9969dbab19e36e3f2bd1210c93840bc85bfd7f16 100644 (file)
@@ -276,7 +276,7 @@ static struct platform_device mtx1_pci_host = {
        .resource       = alchemy_pci_host_res,
 };
 
-static struct __initdata platform_device * mtx1_devs[] = {
+static struct platform_device *mtx1_devs[] __initdata = {
        &mtx1_pci_host,
        &mtx1_gpio_leds,
        &mtx1_wdt,
index c76a90f7866427724d74a4df340ab3de9fdf6d9d..bac19dc43d1ddf6c101ffafaa18c590ccde8d523 100644 (file)
@@ -59,7 +59,7 @@ void __init board_setup(void)
                ret = -ENODEV;
        }
        if (ret)
-               panic("cannot initialize board support\n");
+               panic("cannot initialize board support");
 }
 
 int __init db1235_arch_init(void)
index c3b04c929f29b552014013f253d18e8500faae5b..516225d207eeed9cd934a00c8665cc936061fc1d 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ar933x_uart_platform.h>
 #include "common.h"
 #include "dev-common.h"
 
@@ -68,15 +67,11 @@ static struct resource ar933x_uart_resources[] = {
        },
 };
 
-static struct ar933x_uart_platform_data ar933x_uart_data;
 static struct platform_device ar933x_uart_device = {
        .name           = "ar933x-uart",
        .id             = -1,
        .resource       = ar933x_uart_resources,
        .num_resources  = ARRAY_SIZE(ar933x_uart_resources),
-       .dev = {
-               .platform_data  = &ar933x_uart_data,
-       },
 };
 
 void __init ath79_register_uart(void)
@@ -93,7 +88,6 @@ void __init ath79_register_uart(void)
                ath79_uart_data[0].uartclk = uart_clk_rate;
                platform_device_register(&ath79_uart_device);
        } else if (soc_is_ar933x()) {
-               ar933x_uart_data.uartclk = uart_clk_rate;
                platform_device_register(&ar933x_uart_device);
        } else {
                BUG();
index f3bf6d5bfb9d9430023d001e98c7468fa342b1eb..c52daf9b05c638afbe4062ea204bb6522c15ccd1 100644 (file)
@@ -4,4 +4,5 @@
 #
 
 obj-y                          += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y                          += board.o
 obj-$(CONFIG_BCM47XX_SSB)      += wgt634u.o
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
new file mode 100644 (file)
index 0000000..f3f6bfe
--- /dev/null
@@ -0,0 +1,309 @@
+#include <linux/export.h>
+#include <linux/string.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx_nvram.h>
+
+struct bcm47xx_board_type {
+       const enum bcm47xx_board board;
+       const char *name;
+};
+
+struct bcm47xx_board_type_list1 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+};
+
+struct bcm47xx_board_type_list2 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+       const char *value2;
+};
+
+struct bcm47xx_board_type_list3 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+       const char *value2;
+       const char *value3;
+};
+
+struct bcm47xx_board_store {
+       enum bcm47xx_board board;
+       char name[BCM47XX_BOARD_MAX_NAME];
+};
+
+/* model_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
+       {{BCM47XX_BOARD_DLINK_DIR130, "D-Link DIR-130"}, "DIR-130"},
+       {{BCM47XX_BOARD_DLINK_DIR330, "D-Link DIR-330"}, "DIR-330"},
+       { {0}, 0},
+};
+
+/* model_no */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_no[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "WL700"},
+       { {0}, 0},
+};
+
+/* machine_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = {
+       {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "WRTSL54GS"},
+       { {0}, 0},
+};
+
+/* hardware_version */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16-"},
+       {{BCM47XX_BOARD_ASUS_WL320GE, "Asus WL320GE"}, "WL320G-"},
+       {{BCM47XX_BOARD_ASUS_WL330GE, "Asus WL330GE"}, "WL330GE-"},
+       {{BCM47XX_BOARD_ASUS_WL500GD, "Asus WL500GD"}, "WL500gd-"},
+       {{BCM47XX_BOARD_ASUS_WL500GPV1, "Asus WL500GP V1"}, "WL500gp-"},
+       {{BCM47XX_BOARD_ASUS_WL500GPV2, "Asus WL500GP V2"}, "WL500GPV2-"},
+       {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
+       {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
+       {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
+       {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
+       { {0}, 0},
+};
+
+/* productid */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_RTAC66U, "Asus RT-AC66U"}, "RT-AC66U"},
+       {{BCM47XX_BOARD_ASUS_RTN10, "Asus RT-N10"}, "RT-N10"},
+       {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RT-N10D"},
+       {{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RT-N10U"},
+       {{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
+       {{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RT-N12B1"},
+       {{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RT-N12C1"},
+       {{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RT-N12D1"},
+       {{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RT-N12HP"},
+       {{BCM47XX_BOARD_ASUS_RTN15U, "Asus RT-N15U"}, "RT-N15U"},
+       {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16"},
+       {{BCM47XX_BOARD_ASUS_RTN53, "Asus RT-N53"}, "RT-N53"},
+       {{BCM47XX_BOARD_ASUS_RTN66U, "Asus RT-N66U"}, "RT-N66U"},
+       {{BCM47XX_BOARD_ASUS_WL300G, "Asus WL300G"}, "WL300g"},
+       {{BCM47XX_BOARD_ASUS_WLHDD, "Asus WLHDD"}, "WLHDD"},
+       { {0}, 0},
+};
+
+/* ModelId */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_ModelId[] __initconst = {
+       {{BCM47XX_BOARD_DELL_TM2300, "Dell WX-5565"}, "WX-5565"},
+       {{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"},
+       {{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"},
+       {{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"},
+       { {0}, 0},
+};
+
+/* melco_id or buf1falo_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_melco_id[] __initconst = {
+       {{BCM47XX_BOARD_BUFFALO_WBR2_G54, "Buffalo WBR2-G54"}, "29bb0332"},
+       {{BCM47XX_BOARD_BUFFALO_WHR2_A54G54, "Buffalo WHR2-A54G54"}, "290441dd"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_G125, "Buffalo WHR-G125"}, "32093"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_G54S, "Buffalo WHR-G54S"}, "30182"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_HP_G54, "Buffalo WHR-HP-G54"}, "30189"},
+       {{BCM47XX_BOARD_BUFFALO_WLA2_G54L, "Buffalo WLA2-G54L"}, "29129"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_G300N, "Buffalo WZR-G300N"}, "31120"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54, "Buffalo WZR-RS-G54"}, "30083"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, "Buffalo WZR-RS-G54HP"}, "30103"},
+       { {0}, 0},
+};
+
+/* boot_hw_model, boot_hw_ver */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_boot_hw[] __initconst = {
+       /* like WRT160N v3.0 */
+       {{BCM47XX_BOARD_CISCO_M10V1, "Cisco M10"}, "M10", "1.0"},
+       /* like WRT310N v2.0 */
+       {{BCM47XX_BOARD_CISCO_M20V1, "Cisco M20"}, "M20", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E900V1, "Linksys E900 V1"}, "E900", "1.0"},
+       /* like WRT160N v3.0 */
+       {{BCM47XX_BOARD_LINKSYS_E1000V1, "Linksys E1000 V1"}, "E100", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E1000V2, "Linksys E1000 V2"}, "E1000", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_E1000V21, "Linksys E1000 V2.1"}, "E1000", "2.1"},
+       {{BCM47XX_BOARD_LINKSYS_E1200V2, "Linksys E1200 V2"}, "E1200", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_E2000V1, "Linksys E2000 V1"}, "Linksys E2000", "1.0"},
+       /* like WRT610N v2.0 */
+       {{BCM47XX_BOARD_LINKSYS_E3000V1, "Linksys E3000 V1"}, "E300", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E3200V1, "Linksys E3200 V1"}, "E3200", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E4200V1, "Linksys E4200 V1"}, "E4200", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT150NV11, "Linksys WRT150N V1.1"}, "WRT150N", "1.1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT150NV1, "Linksys WRT150N V1"}, "WRT150N", "1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT160NV1, "Linksys WRT160N V1"}, "WRT160N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT160NV3, "Linksys WRT160N V3"}, "WRT160N", "3.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT300NV11, "Linksys WRT300N V1.1"}, "WRT300N", "1.1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT310NV1, "Linksys WRT310N V1"}, "WRT310N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT310NV2, "Linksys WRT310N V2"}, "WRT310N", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
+       { {0}, 0},
+};
+
+/* board_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
+       {{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, "Netgear WNDR3400 Vcna"}, "U12H155T01_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3700V3, "Netgear WNDR3700 V3"}, "U12H194T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4500V1, "Netgear WNDR4500 V1"}, "U12H189T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4500V2, "Netgear WNDR4500 V2"}, "U12H224T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR834BV2, "Netgear WNR834B V2"}, "U12H081T00_NETGEAR"},
+       { {0}, 0},
+};
+
+/* boardtype, boardnum, boardrev */
+static const
+struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
+       {{BCM47XX_BOARD_HUAWEI_E970, "Huawei E970"}, "0x048e", "0x5347", "0x11"},
+       {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
+       {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
+       { {0}, 0},
+};
+
+static const
+struct bcm47xx_board_type bcm47xx_board_unknown[] __initconst = {
+       {BCM47XX_BOARD_UNKNOWN, "Unknown Board"},
+};
+
+static struct bcm47xx_board_store bcm47xx_board = {BCM47XX_BOARD_NO, "Unknown Board"};
+
+static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
+{
+       char buf1[30];
+       char buf2[30];
+       char buf3[30];
+       const struct bcm47xx_board_type_list1 *e1;
+       const struct bcm47xx_board_type_list2 *e2;
+       const struct bcm47xx_board_type_list3 *e3;
+
+       if (bcm47xx_nvram_getenv("model_name", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_model_name; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("model_no", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_model_no; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("machine_name", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_machine_name; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_hardware_version; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("productid", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_productid; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("ModelId", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_ModelId; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("melco_id", buf1, sizeof(buf1)) >= 0 ||
+           bcm47xx_nvram_getenv("buf1falo_id", buf1, sizeof(buf1)) >= 0) {
+               /* buffalo hardware, check id for specific hardware matches */
+               for (e1 = bcm47xx_board_list_melco_id; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("boot_hw_model", buf1, sizeof(buf1)) >= 0 &&
+           bcm47xx_nvram_getenv("boot_hw_ver", buf2, sizeof(buf2)) >= 0) {
+               for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+                       if (!strcmp(buf1, e2->value1) &&
+                           !strcmp(buf2, e2->value2))
+                               return &e2->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("board_id", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_board_id; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+           bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0 &&
+           bcm47xx_nvram_getenv("boardrev", buf3, sizeof(buf3)) >= 0) {
+               for (e3 = bcm47xx_board_list_board; e3->value1; e3++) {
+                       if (!strcmp(buf1, e3->value1) &&
+                           !strcmp(buf2, e3->value2) &&
+                           !strcmp(buf3, e3->value3))
+                               return &e3->board;
+               }
+       }
+       return bcm47xx_board_unknown;
+}
+
+void __init bcm47xx_board_detect(void)
+{
+       int err;
+       char buf[10];
+       const struct bcm47xx_board_type *board_detected;
+
+       if (bcm47xx_board.board != BCM47XX_BOARD_NO)
+               return;
+
+       /* check if the nvram is available */
+       err = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf));
+
+       /* init of nvram failed, probably too early now */
+       if (err == -ENXIO) {
+               return;
+       }
+
+       board_detected = bcm47xx_board_get_nvram();
+       bcm47xx_board.board = board_detected->board;
+       strlcpy(bcm47xx_board.name, board_detected->name,
+               BCM47XX_BOARD_MAX_NAME);
+}
+
+enum bcm47xx_board bcm47xx_board_get(void)
+{
+       return bcm47xx_board.board;
+}
+EXPORT_SYMBOL(bcm47xx_board_get);
+
+const char *bcm47xx_board_get_name(void)
+{
+       return bcm47xx_board.name;
+}
+EXPORT_SYMBOL(bcm47xx_board_get_name);
index cc40b74940f5f6531e4e74cfcf4c665a7b13a9fd..b4c585b1c62eb6279504daea7e1f3a2e7d0b777d 100644 (file)
@@ -190,3 +190,23 @@ int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
        return -ENOENT;
 }
 EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+       int i, err;
+       char nvram_var[10];
+       char buf[30];
+
+       for (i = 0; i < 16; i++) {
+               err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+               if (err <= 0)
+                       continue;
+               err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+               if (err <= 0)
+                       continue;
+               if (!strcmp(name, buf))
+                       return i;
+       }
+       return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
index 8c155afb1299e7eda1a413cf4fe4fff3e3d2f093..5cba318bc1cd8e1ce32a8535ecbf79acf1000bf5 100644 (file)
 #include <asm/bootinfo.h>
 #include <asm/fw/cfe/cfe_api.h>
 #include <asm/fw/cfe/cfe_error.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
 
 static int cfe_cons_handle;
 
+static u16 get_chip_id(void)
+{
+       switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+       case BCM47XX_BUS_TYPE_SSB:
+               return bcm47xx_bus.ssb.chip_id;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+       case BCM47XX_BUS_TYPE_BCMA:
+               return bcm47xx_bus.bcma.bus.chipinfo.id;
+#endif
+       }
+       return 0;
+}
+
 const char *get_system_type(void)
 {
-       return "Broadcom BCM47XX";
+       static char buf[50];
+       u16 chip_id = get_chip_id();
+
+       snprintf(buf, sizeof(buf),
+                (chip_id > 0x9999) ? "Broadcom BCM%d (%s)" :
+                                     "Broadcom BCM%04X (%s)",
+                chip_id, bcm47xx_board_get_name());
+
+       return buf;
 }
 
 void prom_putchar(char c)
index b2246cd9ca1283eeccddbc8309c49eca28364c9a..1f30571968e78194ad6338b4f0ac3cb3b64332bc 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/time.h>
 #include <bcm47xx.h>
 #include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
 
 union bcm47xx_bus bcm47xx_bus;
 EXPORT_SYMBOL(bcm47xx_bus);
@@ -221,6 +222,7 @@ void __init plat_mem_setup(void)
        _machine_restart = bcm47xx_machine_restart;
        _machine_halt = bcm47xx_machine_halt;
        pm_power_off = bcm47xx_machine_halt;
+       bcm47xx_board_detect();
 }
 
 static int __init bcm47xx_register_bus_complete(void)
index 536374dcba78995981b2a0bc04ab7ae62941aa00..2c85d9254b5e91d017e6a36351294e0c7b931d74 100644 (file)
 #include <linux/ssb/ssb.h>
 #include <asm/time.h>
 #include <bcm47xx.h>
+#include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
 
 void __init plat_time_init(void)
 {
        unsigned long hz = 0;
+       u16 chip_id = 0;
+       char buf[10];
+       int len;
+       enum bcm47xx_board board = bcm47xx_board_get();
 
        /*
         * Use deterministic values for initial counter interrupt
@@ -43,15 +49,32 @@ void __init plat_time_init(void)
 #ifdef CONFIG_BCM47XX_SSB
        case BCM47XX_BUS_TYPE_SSB:
                hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
+               chip_id = bcm47xx_bus.ssb.chip_id;
                break;
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
        case BCM47XX_BUS_TYPE_BCMA:
                hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
+               chip_id = bcm47xx_bus.bcma.bus.chipinfo.id;
                break;
 #endif
        }
 
+       if (chip_id == 0x5354) {
+               len = bcm47xx_nvram_getenv("clkfreq", buf, sizeof(buf));
+               if (len >= 0 && !strncmp(buf, "200", 4))
+                       hz = 100000000;
+       }
+
+       switch (board) {
+       case BCM47XX_BOARD_ASUS_WL520GC:
+       case BCM47XX_BOARD_ASUS_WL520GU:
+               hz = 100000000;
+               break;
+       default:
+               break;
+       }
+
        if (!hz)
                hz = 100000000;
 
index 0048c08978965428a32a03bf211c3f86909c12fb..ca0c343c9ea5ed024b87a5e314b4118d9ecff04e 100644 (file)
@@ -37,6 +37,10 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)                += $(obj)/uart-alchemy.o
 endif
 
+ifdef CONFIG_KERNEL_XZ
+vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o
+endif
+
 targets += vmlinux.bin
 OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
 $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
@@ -44,8 +48,10 @@ $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
 
 tool_$(CONFIG_KERNEL_GZIP)    = gzip
 tool_$(CONFIG_KERNEL_BZIP2)   = bzip2
+tool_$(CONFIG_KERNEL_LZ4)     = lz4
 tool_$(CONFIG_KERNEL_LZMA)    = lzma
 tool_$(CONFIG_KERNEL_LZO)     = lzo
+tool_$(CONFIG_KERNEL_XZ)      = xzkern
 
 targets += vmlinux.bin.z
 $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
index 2c9573098c0dab7889895de68c2dae5bb9ad9ba8..a8c6fd6a440667cf58c35d4655862783de7d271f 100644 (file)
@@ -43,7 +43,8 @@ void error(char *x)
 /* activate the code for pre-boot environment */
 #define STATIC static
 
-#ifdef CONFIG_KERNEL_GZIP
+#if defined(CONFIG_KERNEL_GZIP) || defined(CONFIG_KERNEL_XZ) || \
+       defined(CONFIG_KERNEL_LZ4)
 void *memcpy(void *dest, const void *src, size_t n)
 {
        int i;
@@ -54,6 +55,8 @@ void *memcpy(void *dest, const void *src, size_t n)
                d[i] = s[i];
        return dest;
 }
+#endif
+#ifdef CONFIG_KERNEL_GZIP
 #include "../../../../lib/decompress_inflate.c"
 #endif
 
@@ -70,6 +73,10 @@ void *memset(void *s, int c, size_t n)
 #include "../../../../lib/decompress_bunzip2.c"
 #endif
 
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZMA
 #include "../../../../lib/decompress_unlzma.c"
 #endif
@@ -78,6 +85,10 @@ void *memset(void *s, int c, size_t n)
 #include "../../../../lib/decompress_unlzo.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 void decompress_kernel(unsigned long boot_heap_start)
 {
        unsigned long zimage_start, zimage_size;
index 8e6b07ca2f5e6924b68b8ce177aa7c9e7119e80e..5a33409c7f63b09bf7599ed77a4dd762c99f8ead 100644 (file)
@@ -8,6 +8,9 @@
 
 OUTPUT_ARCH(mips)
 ENTRY(start)
+PHDRS {
+       text PT_LOAD FLAGS(7); /* RWX */
+}
 SECTIONS
 {
        /* Text and read-only data */
@@ -15,7 +18,7 @@ SECTIONS
        .text : {
                *(.text)
                *(.rodata)
-       }
+       }: text
        /* End of text section */
 
        /* Writable data */
index b212ae12e5ac7dc8ca35dfb24324ba638353f85e..331b837cec57d284d704282aec779056c7f623be 100644 (file)
@@ -999,7 +999,7 @@ void __init plat_mem_setup(void)
 
        if (total == 0)
                panic("Unable to allocate memory from "
-                     "cvmx_bootmem_phy_alloc\n");
+                     "cvmx_bootmem_phy_alloc");
 }
 
 /*
@@ -1081,7 +1081,7 @@ void __init device_tree_init(void)
        /* Copy the default tree from init memory. */
        initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
        if (initial_boot_params == NULL)
-               panic("Could not allocate initial_boot_params\n");
+               panic("Could not allocate initial_boot_params");
        memcpy(initial_boot_params, fdt, dt_size);
 
        if (do_prune) {
index 61a334ac43ac7c52565713e4a45ed882da57132d..558e94977942033dc8247bcc510ebb705aa9698a 100644 (file)
@@ -5,5 +5,4 @@
 obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o
 
 obj-$(CONFIG_PCI)              += pci.o
-obj-$(CONFIG_EARLY_PRINTK)     += console.o
 obj-$(CONFIG_MTD_PHYSMAP)      += mtd.o
diff --git a/arch/mips/cobalt/console.c b/arch/mips/cobalt/console.c
deleted file mode 100644 (file)
index d1ba701..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * (C) P. Horton 2006
- */
-#include <linux/io.h>
-#include <linux/serial_reg.h>
-
-#include <cobalt.h>
-
-#define UART_BASE      ((void __iomem *)CKSEG1ADDR(0x1c800000))
-
-void prom_putchar(char c)
-{
-       if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
-               return;
-
-       while (!(readb(UART_BASE + UART_LSR) & UART_LSR_THRE))
-               ;
-
-       writeb(c, UART_BASE + UART_TX);
-}
index ec3b2c417f7cc42b1d3390355cd1067547664022..9a8c2fe8d3345a95a644787668e118bbfa51fedb 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/gt64120.h>
 
 #include <cobalt.h>
@@ -112,6 +113,8 @@ void __init prom_init(void)
        }
 
        add_memory_region(0x0, memsz, BOOT_MEM_RAM);
+
+       setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
deleted file mode 100644 (file)
index 7fda0ce..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-CONFIG_POWERTV=y
-CONFIG_BOOTLOADER_FAMILY="R2"
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE=""
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_GZIP is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_PCSPKR_PLATFORM is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
-# CONFIG_IPV6_SIT is not set
-CONFIG_IPV6_TUNNEL=y
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_BRIDGE=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_TBF=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_ATA=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_WLAN is not set
-CONFIG_USB_RTL8150=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
-# CONFIG_VGA_ARB is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_CP210X=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_FUSE_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_EARLY_PRINTK is not set
-CONFIG_CMDLINE_BOOL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
index 22afed16ccde293a741e88c9f2fd9f56b733ccd1..41a2fa1fa12e549f75f094505ca044a19b56088d 100644 (file)
  *            7        FPU/R4k timer
  *
  * We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return.         If multiple IRQs are pending then we will
+ * then we just return.  If multiple IRQs are pending then we will
  * just take another exception, big deal.
  */
                .align  5
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,cpu_mask_nr_tbl
+                PTR_LA t1,cpu_mask_nr_tbl
 1:             lw      t2,(t1)
                nop
                and     t2,t0
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,asic_mask_nr_tbl
+                PTR_LA t1,asic_mask_nr_tbl
 2:             lw      t2,(t1)
                nop
                and     t2,t0
                FEXPORT(cpu_all_int)            # HALT, timers, software junk
                li      a0,DEC_CPU_IRQ_BASE
                srl     t0,CAUSEB_IP
-               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
+               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
                b       1f
                 li     t2,4                    # nr of bits / 2
 
index 4b3e3a4375a6756b2af743c3f163d50055fb2c45..e04d973ce5aa24cd832771b417b0b5ce23f14ac4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     DEC I/O ASIC interrupts.
  *
- *     Copyright (c) 2002, 2003  Maciej W. Rozycki
+ *     Copyright (c) 2002, 2003, 2013  Maciej W. Rozycki
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
@@ -51,22 +51,51 @@ static struct irq_chip ioasic_irq_type = {
        .irq_unmask = unmask_ioasic_irq,
 };
 
-void clear_ioasic_dma_irq(unsigned int irq)
+static void clear_ioasic_dma_irq(struct irq_data *d)
 {
        u32 sir;
 
-       sir = ~(1 << (irq - ioasic_irq_base));
+       sir = ~(1 << (d->irq - ioasic_irq_base));
        ioasic_write(IO_REG_SIR, sir);
+       fast_iob();
 }
 
 static struct irq_chip ioasic_dma_irq_type = {
        .name = "IO-ASIC-DMA",
-       .irq_ack = ack_ioasic_irq,
+       .irq_ack = clear_ioasic_dma_irq,
        .irq_mask = mask_ioasic_irq,
-       .irq_mask_ack = ack_ioasic_irq,
        .irq_unmask = unmask_ioasic_irq,
+       .irq_eoi = clear_ioasic_dma_irq,
 };
 
+/*
+ * I/O ASIC implements two kinds of DMA interrupts, informational and
+ * error interrupts.
+ *
+ * The formers do not stop DMA and should be cleared as soon as possible
+ * so that if they retrigger before the handler has completed, usually as
+ * a side effect of actions taken by the handler, then they are reissued.
+ * These use the `handle_edge_irq' handler that clears the request right
+ * away.
+ *
+ * The latters stop DMA and do not resume it until the interrupt has been
+ * cleared.  This cannot be done until after a corrective action has been
+ * taken and this also means they will not retrigger.  Therefore they use
+ * the `handle_fasteoi_irq' handler that only clears the request on the
+ * way out.  Because MIPS processor interrupt inputs, one of which the I/O
+ * ASIC is cascaded to, are level-triggered it is recommended that error
+ * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
+ * set so that they are run with the interrupt line masked.
+ *
+ * This mask has `1' bits in the positions of informational interrupts.
+ */
+#define IO_IRQ_DMA_INFO                                                        \
+       (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) |                              \
+        IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) |                              \
+        IO_IRQ_MASK(IO_INR_ISDN_TXDMA) |                               \
+        IO_IRQ_MASK(IO_INR_ISDN_RXDMA) |                               \
+        IO_IRQ_MASK(IO_INR_ASC_DMA))
+
 void __init init_ioasic_irqs(int base)
 {
        int i;
@@ -79,7 +108,9 @@ void __init init_ioasic_irqs(int base)
                irq_set_chip_and_handler(i, &ioasic_irq_type,
                                         handle_level_irq);
        for (; i < base + IO_IRQ_LINES; i++)
-               irq_set_chip(i, &ioasic_dma_irq_type);
+               irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
+                                        1 << (i - base) & IO_IRQ_DMA_INFO ?
+                                        handle_edge_irq : handle_fasteoi_irq);
 
        ioasic_irq_base = base;
 }
index c0d1522d448f91591e06131454598232f931198a..8c8498159e434fa039f7ecae3e290ee0c31d6677 100644 (file)
@@ -14,7 +14,7 @@
 
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save. */
+/* Number of static registers we save.  */
 #define O32_STATC      11
 /* Frame size for both of the above.  */
 #define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
index 468f665de7bb731e5d8cf71dcef5c1461c146e48..4e1761e0a09af73a8e9477e09e0ce34e7a981a0e 100644 (file)
@@ -104,7 +104,7 @@ void __init prom_init(void)
        if (prom_is_rex(magic))
                rex_clear_cache();
 
-       /* Register the early console.  */
+       /* Register the early console.  */
        register_prom_console();
 
        /* Were we compiled with the right CPU option? */
index 0aadac74290031dffd4d49a3d80de9054118e93a..8c62316f22f438b4b9cc18b23585f3e0ad8b7258 100644 (file)
@@ -22,7 +22,7 @@ volatile unsigned long mem_err;               /* So we know an error occurred */
 
 /*
  * Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory.  Only suitable for the 2100/3100's (PMAX).
  */
 
 #define CHUNK_SIZE 0x400000
index 741cb4235bde2106e2e152bcd1caa2ca63c020b9..56e6e2c23683b328c22f51831908414750cf710a 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
 /*
  * IRQ routing and priority tables.  Priorites are set as follows:
  *
- *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
+ *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
  *
  * MEMORY      CPU     CPU     CPU     ASIC    CPU     CPU
  * RTC         CPU     CPU     CPU     ASIC    CPU     CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
 
 /*
  * Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min.         Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min.  Also applies to KN04(-BA), aka
  * DS5000/150, aka 4min.
  */
 static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
index 68f37e3eccc7f5032e98a7aa854eeceeb1783ace..c75025f27c201f02a9b85f65c2fdbbc529768325 100644 (file)
 /*
  * Cache Operations available on all MIPS processors with R4000-style caches
  */
-#define Index_Invalidate_I     0x00
-#define Index_Writeback_Inv_D  0x01
-#define Index_Load_Tag_I       0x04
-#define Index_Load_Tag_D       0x05
-#define Index_Store_Tag_I      0x08
-#define Index_Store_Tag_D      0x09
-#if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I       0x00
-#else
-#define Hit_Invalidate_I       0x10
-#endif
-#define Hit_Invalidate_D       0x11
-#define Hit_Writeback_Inv_D    0x15
+#define Index_Invalidate_I             0x00
+#define Index_Writeback_Inv_D          0x01
+#define Index_Load_Tag_I               0x04
+#define Index_Load_Tag_D               0x05
+#define Index_Store_Tag_I              0x08
+#define Index_Store_Tag_D              0x09
+#define Hit_Invalidate_I               0x10
+#define Hit_Invalidate_D               0x11
+#define Hit_Writeback_Inv_D            0x15
 
 /*
  * R4000-specific cacheops
  */
-#define Create_Dirty_Excl_D    0x0d
-#define Fill                   0x14
-#define Hit_Writeback_I                0x18
-#define Hit_Writeback_D                0x19
+#define Create_Dirty_Excl_D            0x0d
+#define Fill                           0x14
+#define Hit_Writeback_I                        0x18
+#define Hit_Writeback_D                        0x19
 
 /*
  * R4000SC and R4400SC-specific cacheops
  */
-#define Index_Invalidate_SI    0x02
-#define Index_Writeback_Inv_SD 0x03
-#define Index_Load_Tag_SI      0x06
-#define Index_Load_Tag_SD      0x07
-#define Index_Store_Tag_SI     0x0A
-#define Index_Store_Tag_SD     0x0B
-#define Create_Dirty_Excl_SD   0x0f
-#define Hit_Invalidate_SI      0x12
-#define Hit_Invalidate_SD      0x13
-#define Hit_Writeback_Inv_SD   0x17
-#define Hit_Writeback_SD       0x1b
-#define Hit_Set_Virtual_SI     0x1e
-#define Hit_Set_Virtual_SD     0x1f
+#define Index_Invalidate_SI            0x02
+#define Index_Writeback_Inv_SD         0x03
+#define Index_Load_Tag_SI              0x06
+#define Index_Load_Tag_SD              0x07
+#define Index_Store_Tag_SI             0x0A
+#define Index_Store_Tag_SD             0x0B
+#define Create_Dirty_Excl_SD           0x0f
+#define Hit_Invalidate_SI              0x12
+#define Hit_Invalidate_SD              0x13
+#define Hit_Writeback_Inv_SD           0x17
+#define Hit_Writeback_SD               0x1b
+#define Hit_Set_Virtual_SI             0x1e
+#define Hit_Set_Virtual_SD             0x1f
 
 /*
  * R5000-specific cacheops
  */
-#define R5K_Page_Invalidate_S  0x17
+#define R5K_Page_Invalidate_S          0x17
 
 /*
  * RM7000-specific cacheops
  */
-#define Page_Invalidate_T      0x16
-#define Index_Store_Tag_T      0x0a
-#define Index_Load_Tag_T       0x06
+#define Page_Invalidate_T              0x16
+#define Index_Store_Tag_T              0x0a
+#define Index_Load_Tag_T               0x06
 
 /*
  * R10000-specific cacheops
  * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
  * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
  */
-#define Index_Writeback_Inv_S  0x03
-#define Index_Load_Tag_S       0x07
-#define Index_Store_Tag_S      0x0B
-#define Hit_Invalidate_S       0x13
-#define Cache_Barrier          0x14
-#define Hit_Writeback_Inv_S    0x17
-#define Index_Load_Data_I      0x18
-#define Index_Load_Data_D      0x19
-#define Index_Load_Data_S      0x1b
-#define Index_Store_Data_I     0x1c
-#define Index_Store_Data_D     0x1d
-#define Index_Store_Data_S     0x1f
+#define Index_Writeback_Inv_S          0x03
+#define Index_Load_Tag_S               0x07
+#define Index_Store_Tag_S              0x0B
+#define Hit_Invalidate_S               0x13
+#define Cache_Barrier                  0x14
+#define Hit_Writeback_Inv_S            0x17
+#define Index_Load_Data_I              0x18
+#define Index_Load_Data_D              0x19
+#define Index_Load_Data_S              0x1b
+#define Index_Store_Data_I             0x1c
+#define Index_Store_Data_D             0x1d
+#define Index_Store_Data_S             0x1f
+
+/*
+ * Loongson2-specific cacheops
+ */
+#define Hit_Invalidate_I_Loongson23    0x00
 
 #endif /* __ASM_CACHEOPS_H */
index 51680d15ca8ec2b2971365573506b39f473170aa..d445d060e346ab689a177bf584cfebd5b9dfca51 100644 (file)
 
 /*
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  */
index a6e505a0e44b4b119a6967fd5f8938ffb211a73e..be4d62a5a10ee203c8d2c9b16dc4ffda9551a493 100644 (file)
@@ -31,8 +31,6 @@ static inline u32 ioasic_read(unsigned int reg)
        return ioasic_base[reg / 4];
 }
 
-extern void clear_ioasic_dma_irq(unsigned int irq);
-
 extern void init_ioasic_irqs(int base);
 
 extern int dec_ioasic_clocksource_init(void);
index a8665a7611c2af3141313c8f21ad32eeae128dc2..8bd95971fe2dc98030565c6d41766ae1eb2b966a 100644 (file)
@@ -40,7 +40,7 @@
 #define IOASIC_FLOPPY  (11*IOASIC_SLOT_SIZE)   /* FDC (maxine) */
 #define IOASIC_SCSI    (12*IOASIC_SLOT_SIZE)   /* ASC SCSI */
 #define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE)   /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE)  /* ??? */
+#define IOASIC_SCSI_DMA        (14*IOASIC_SLOT_SIZE)   /* ??? */
 #define IOASIC_RES_15  (15*IOASIC_SLOT_SIZE)   /* unused? */
 
 
index 0eb3241de7060883d6b72e2878da258d9b41caf2..88d9ffd742588b41c99975943c7769fb75f18d25 100644 (file)
 /*
  * System Control & Status Register bits.
  */
-#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
-#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
+#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
 #define KN01_CSR_VINT          (1<<9)  /* PCC area detect #2 status & ack */
 #define KN01_CSR_TXDIS         (1<<8)  /* DZ11 transmit disable */
 #define KN01_CSR_VBGTRG                (1<<2)  /* blue DAC voltage over green (r/o) */
index 69dc2a9a2d0f5602d91c4b83306f6f0952572a52..92c0fe2560997e901908eac961e8b27d34097f46 100644 (file)
@@ -68,7 +68,7 @@
 #define KN03CA_IO_SSR_ISDN_RST (1<<12)         /* ~ISDN (Am79C30A) reset */
 
 #define KN03CA_IO_SSR_FLOPPY_RST (1<<7)                /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST (1<<6)         /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST        (1<<6)          /* ~framebuffer reset */
 #define KN03CA_IO_SSR_AB_RST   (1<<5)          /* ACCESS.bus reset */
 #define KN03CA_IO_SSR_RES_4    (1<<4)          /* unused */
 #define KN03CA_IO_SSR_RES_3    (1<<4)          /* unused */
index 446577712bee23d45be3f9375a6d8ee8f2806a2e..c0ead63138453c04d3b20918fbcfb1e1c02c5c3c 100644 (file)
@@ -49,7 +49,7 @@
 
 #ifdef CONFIG_64BIT
 
-#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
+#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
 
 #else /* !CONFIG_64BIT */
 
index cf3ae2480b1d25f5dfd77e8e328e5745b413e2be..a66359ef4ece770e3ed1a6518e96b79e733b4afa 100644 (file)
@@ -331,6 +331,7 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs)                  \
        dump_task_fpu(tsk, elf_fpregs)
 
+#define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
 /* This yields a mask that user programs can use to figure out what
index 4d6d77ed9b9d679cd955e2fafe2dbc16527db65c..e194f957ca8c42a0af82f0621c425308a25e4d53 100644 (file)
@@ -22,7 +22,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\tnop\n\t"
+       asm_volatile_goto("1:\tnop\n\t"
                "nop\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h b/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h
deleted file mode 100644 (file)
index 6cb30f2..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- *  Platform data definition for Atheros AR933X UART
- *
- *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#ifndef _AR933X_UART_PLATFORM_H
-#define _AR933X_UART_PLATFORM_H
-
-struct ar933x_uart_platform_data {
-       unsigned        uartclk;
-};
-
-#endif /* _AR933X_UART_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
new file mode 100644 (file)
index 0000000..00867dd
--- /dev/null
@@ -0,0 +1,110 @@
+#ifndef __BCM47XX_BOARD_H
+#define __BCM47XX_BOARD_H
+
+enum bcm47xx_board {
+       BCM47XX_BOARD_ASUS_RTAC66U,
+       BCM47XX_BOARD_ASUS_RTN10,
+       BCM47XX_BOARD_ASUS_RTN10D,
+       BCM47XX_BOARD_ASUS_RTN10U,
+       BCM47XX_BOARD_ASUS_RTN12,
+       BCM47XX_BOARD_ASUS_RTN12B1,
+       BCM47XX_BOARD_ASUS_RTN12C1,
+       BCM47XX_BOARD_ASUS_RTN12D1,
+       BCM47XX_BOARD_ASUS_RTN12HP,
+       BCM47XX_BOARD_ASUS_RTN15U,
+       BCM47XX_BOARD_ASUS_RTN16,
+       BCM47XX_BOARD_ASUS_RTN53,
+       BCM47XX_BOARD_ASUS_RTN66U,
+       BCM47XX_BOARD_ASUS_WL300G,
+       BCM47XX_BOARD_ASUS_WL320GE,
+       BCM47XX_BOARD_ASUS_WL330GE,
+       BCM47XX_BOARD_ASUS_WL500GD,
+       BCM47XX_BOARD_ASUS_WL500GPV1,
+       BCM47XX_BOARD_ASUS_WL500GPV2,
+       BCM47XX_BOARD_ASUS_WL500W,
+       BCM47XX_BOARD_ASUS_WL520GC,
+       BCM47XX_BOARD_ASUS_WL520GU,
+       BCM47XX_BOARD_ASUS_WL700GE,
+       BCM47XX_BOARD_ASUS_WLHDD,
+
+       BCM47XX_BOARD_BELKIN_F7D4301,
+
+       BCM47XX_BOARD_BUFFALO_WBR2_G54,
+       BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
+       BCM47XX_BOARD_BUFFALO_WHR_G125,
+       BCM47XX_BOARD_BUFFALO_WHR_G54S,
+       BCM47XX_BOARD_BUFFALO_WHR_HP_G54,
+       BCM47XX_BOARD_BUFFALO_WLA2_G54L,
+       BCM47XX_BOARD_BUFFALO_WZR_G300N,
+       BCM47XX_BOARD_BUFFALO_WZR_RS_G54,
+       BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP,
+
+       BCM47XX_BOARD_CISCO_M10V1,
+       BCM47XX_BOARD_CISCO_M20V1,
+
+       BCM47XX_BOARD_DELL_TM2300,
+
+       BCM47XX_BOARD_DLINK_DIR130,
+       BCM47XX_BOARD_DLINK_DIR330,
+
+       BCM47XX_BOARD_HUAWEI_E970,
+
+       BCM47XX_BOARD_LINKSYS_E900V1,
+       BCM47XX_BOARD_LINKSYS_E1000V1,
+       BCM47XX_BOARD_LINKSYS_E1000V2,
+       BCM47XX_BOARD_LINKSYS_E1000V21,
+       BCM47XX_BOARD_LINKSYS_E1200V2,
+       BCM47XX_BOARD_LINKSYS_E2000V1,
+       BCM47XX_BOARD_LINKSYS_E3000V1,
+       BCM47XX_BOARD_LINKSYS_E3200V1,
+       BCM47XX_BOARD_LINKSYS_E4200V1,
+       BCM47XX_BOARD_LINKSYS_WRT150NV1,
+       BCM47XX_BOARD_LINKSYS_WRT150NV11,
+       BCM47XX_BOARD_LINKSYS_WRT160NV1,
+       BCM47XX_BOARD_LINKSYS_WRT160NV3,
+       BCM47XX_BOARD_LINKSYS_WRT300NV11,
+       BCM47XX_BOARD_LINKSYS_WRT310NV1,
+       BCM47XX_BOARD_LINKSYS_WRT310NV2,
+       BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
+       BCM47XX_BOARD_LINKSYS_WRT610NV1,
+       BCM47XX_BOARD_LINKSYS_WRT610NV2,
+       BCM47XX_BOARD_LINKSYS_WRTSL54GS,
+
+       BCM47XX_BOARD_MOTOROLA_WE800G,
+       BCM47XX_BOARD_MOTOROLA_WR850GP,
+       BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
+
+       BCM47XX_BOARD_NETGEAR_WGR614V8,
+       BCM47XX_BOARD_NETGEAR_WGR614V9,
+       BCM47XX_BOARD_NETGEAR_WNDR3300,
+       BCM47XX_BOARD_NETGEAR_WNDR3400V1,
+       BCM47XX_BOARD_NETGEAR_WNDR3400V2,
+       BCM47XX_BOARD_NETGEAR_WNDR3400VCNA,
+       BCM47XX_BOARD_NETGEAR_WNDR3700V3,
+       BCM47XX_BOARD_NETGEAR_WNDR4000,
+       BCM47XX_BOARD_NETGEAR_WNDR4500V1,
+       BCM47XX_BOARD_NETGEAR_WNDR4500V2,
+       BCM47XX_BOARD_NETGEAR_WNR2000,
+       BCM47XX_BOARD_NETGEAR_WNR3500L,
+       BCM47XX_BOARD_NETGEAR_WNR3500U,
+       BCM47XX_BOARD_NETGEAR_WNR3500V2,
+       BCM47XX_BOARD_NETGEAR_WNR3500V2VC,
+       BCM47XX_BOARD_NETGEAR_WNR834BV2,
+
+       BCM47XX_BOARD_PHICOMM_M1,
+
+       BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
+
+       BCM47XX_BOARD_ZTE_H218N,
+
+       BCM47XX_BOARD_UNKNOWN,
+       BCM47XX_BOARD_NO,
+};
+
+#define BCM47XX_BOARD_MAX_NAME 30
+
+void bcm47xx_board_detect(void);
+enum bcm47xx_board bcm47xx_board_get(void);
+const char *bcm47xx_board_get_name(void);
+
+#endif /* __BCM47XX_BOARD_H */
index b8e7be8f34dd472076b7ffd10d7de2d14c11b638..36a3fc1aa3ae326def39379e03db8258c24d82d3 100644 (file)
@@ -48,4 +48,6 @@ static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
                printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
 }
 
+int bcm47xx_nvram_gpio_pin(const char *name);
+
 #endif /* __BCM47XX_NVRAM_H */
index 47fb247f9663b501796a7c255b32525c225cffed..f9f448650505bc95dc2b5e426c3acf4869ffbfc5 100644 (file)
@@ -52,23 +52,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 0;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-       BUG();
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 1;
 }
 
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       BUG();
-       return 0;
-}
-
 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
 
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..acce27f
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *     CPU feature overrides for DECstation systems.  Two variations
+ *     are generally applicable.
+ *
+ *     Copyright (C) 2013  Maciej W. Rozycki
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+
+/* Generic ones first.  */
+#define cpu_has_tlb                    1
+#define cpu_has_tx39_cache             0
+#define cpu_has_fpu                    1
+#define cpu_has_divec                  0
+#define cpu_has_prefetch               0
+#define cpu_has_mcheck                 0
+#define cpu_has_ejtag                  0
+#define cpu_has_mips16                 0
+#define cpu_has_mdmx                   0
+#define cpu_has_mips3d                 0
+#define cpu_has_smartmips              0
+#define cpu_has_rixi                   0
+#define cpu_has_vtag_icache            0
+#define cpu_has_ic_fills_f_dc          0
+#define cpu_has_pindexed_dcache                0
+#define cpu_has_local_ebase            0
+#define cpu_icache_snoops_remote_store 1
+#define cpu_has_mips_4                 0
+#define cpu_has_mips_5                 0
+#define cpu_has_mips32r1               0
+#define cpu_has_mips32r2               0
+#define cpu_has_mips64r1               0
+#define cpu_has_mips64r2               0
+#define cpu_has_dsp                    0
+#define cpu_has_mipsmt                 0
+#define cpu_has_userlocal              0
+
+/* R3k-specific ones.  */
+#ifdef CONFIG_CPU_R3000
+#define cpu_has_4kex                   0
+#define cpu_has_3k_cache               1
+#define cpu_has_4k_cache               0
+#define cpu_has_32fpr                  0
+#define cpu_has_counter                        0
+#define cpu_has_watch                  0
+#define cpu_has_vce                    0
+#define cpu_has_cache_cdex_p           0
+#define cpu_has_cache_cdex_s           0
+#define cpu_has_llsc                   0
+#define cpu_has_dc_aliases             0
+#define cpu_has_mips_2                 0
+#define cpu_has_mips_3                 0
+#define cpu_has_nofpuex                        1
+#define cpu_has_inclusive_pcaches      0
+#define cpu_dcache_line_size()         4
+#define cpu_icache_line_size()         4
+#define cpu_scache_line_size()         0
+#endif /* CONFIG_CPU_R3000 */
+
+/* R4k-specific ones.  */
+#ifdef CONFIG_CPU_R4X00
+#define cpu_has_4kex                   1
+#define cpu_has_3k_cache               0
+#define cpu_has_4k_cache               1
+#define cpu_has_32fpr                  1
+#define cpu_has_counter                        1
+#define cpu_has_watch                  1
+#define cpu_has_vce                    1
+#define cpu_has_cache_cdex_p           1
+#define cpu_has_cache_cdex_s           1
+#define cpu_has_llsc                   1
+#define cpu_has_dc_aliases             (PAGE_SIZE < 0x4000)
+#define cpu_has_mips_2                 1
+#define cpu_has_mips_3                 1
+#define cpu_has_nofpuex                        0
+#define cpu_has_inclusive_pcaches      1
+#define cpu_dcache_line_size()         16
+#define cpu_icache_line_size()         16
+#define cpu_scache_line_size()         32
+#endif /* CONFIG_CPU_R4X00 */
+
+#endif /* __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H */
index 74cb99257d5b9b25dda5aec6fa1eaa1553612086..a9e8f6b62b0b9c9fa2a073f1aeb5c08f1d5a6894 100644 (file)
@@ -47,16 +47,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
index 06c441968e6ed4fcb31f6c71f506063d198bfb28..4ffddfdb50623bbbcbc2f5eec3ad9aabb7f78bcc 100644 (file)
@@ -58,16 +58,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 1;               /* IP27 non-cohernet mode is unsupported */
index 073f0c4760ba8cc3cc7cf59380dfbc58c6a409bd..104cfbc3ed63291ec51a6e13152257516cd1d85c 100644 (file)
@@ -80,17 +80,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-       return;
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;               /* IP32 is non-cohernet */
index 9fc1e9ad7038879840d73fc142d7a88b4ebde218..949003ef97b37d0d4023f3da08694c4409139a17 100644 (file)
@@ -48,16 +48,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;
index e1433055fe98b5f6e876313bbdb98b23a2021e75..aeb2c05d61456de8b0143984fe1c9626e8acae6d 100644 (file)
@@ -53,16 +53,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
deleted file mode 100644 (file)
index b341108..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_ASIC_H
-#define _ASM_MACH_POWERTV_ASIC_H
-
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic_regs.h>
-
-#define DVR_CAPABLE    (1<<0)
-#define PCIE_CAPABLE   (1<<1)
-#define FFS_CAPABLE    (1<<2)
-#define DISPLAY_CAPABLE (1<<3)
-
-/* Platform Family types
- * For compitability, the new value must be added in the end */
-enum family_type {
-       FAMILY_8500,
-       FAMILY_8500RNG,
-       FAMILY_4500,
-       FAMILY_1500,
-       FAMILY_8600,
-       FAMILY_4600,
-       FAMILY_4600VZA,
-       FAMILY_8600VZB,
-       FAMILY_1500VZE,
-       FAMILY_1500VZF,
-       FAMILY_8700,
-       FAMILIES
-};
-
-/* Register maps for each ASIC */
-extern const struct register_map calliope_register_map;
-extern const struct register_map cronus_register_map;
-extern const struct register_map gaia_register_map;
-extern const struct register_map zeus_register_map;
-
-extern struct resource dvr_cronus_resources[];
-extern struct resource dvr_gaia_resources[];
-extern struct resource dvr_zeus_resources[];
-extern struct resource non_dvr_calliope_resources[];
-extern struct resource non_dvr_cronus_resources[];
-extern struct resource non_dvr_cronuslite_resources[];
-extern struct resource non_dvr_gaia_resources[];
-extern struct resource non_dvr_vz_calliope_resources[];
-extern struct resource non_dvr_vze_calliope_resources[];
-extern struct resource non_dvr_vzf_calliope_resources[];
-extern struct resource non_dvr_zeus_resources[];
-
-extern void powertv_platform_init(void);
-extern void platform_alloc_bootmem(void);
-extern enum asic_type platform_get_asic(void);
-extern enum family_type platform_get_family(void);
-extern int platform_supports_dvr(void);
-extern int platform_supports_ffs(void);
-extern int platform_supports_pcie(void);
-extern int platform_supports_display(void);
-extern void configure_platform(void);
-
-/* Platform Resources */
-#define ASIC_RESOURCE_GET_EXISTS 1
-extern struct resource *asic_resource_get(const char *name);
-extern void platform_release_memory(void *baddr, int size);
-
-/* USB configuration */
-struct usb_hcd;                        /* Forward reference */
-extern void platform_configure_usb_ehci(void);
-extern void platform_unconfigure_usb_ehci(void);
-extern void platform_configure_usb_ohci(void);
-extern void platform_unconfigure_usb_ohci(void);
-
-/* Resource for ASIC registers */
-extern struct resource asic_resource;
-extern int platform_usb_devices_init(struct platform_device **echi_dev,
-       struct platform_device **ohci_dev);
-
-/* Reboot Cause */
-extern void set_reboot_cause(char code, unsigned int data, unsigned int data2);
-extern void set_locked_reboot_cause(char code, unsigned int data,
-       unsigned int data2);
-
-enum sys_reboot_type {
-       sys_unknown_reboot = 0x00,      /* Unknown reboot cause */
-       sys_davic_change = 0x01,        /* Reboot due to change in DAVIC
-                                        * mode */
-       sys_user_reboot = 0x02,         /* Reboot initiated by user */
-       sys_system_reboot = 0x03,       /* Reboot initiated by OS */
-       sys_trap_reboot = 0x04,         /* Reboot due to a CPU trap */
-       sys_silent_reboot = 0x05,       /* Silent reboot */
-       sys_boot_ldr_reboot = 0x06,     /* Bootloader reboot */
-       sys_power_up_reboot = 0x07,     /* Power on bootup.  Older
-                                        * drivers may report as
-                                        * userReboot. */
-       sys_code_change = 0x08,         /* Reboot to take code change.
-                                        * Older drivers may report as
-                                        * userReboot. */
-       sys_hardware_reset = 0x09,      /* HW watchdog or front-panel
-                                        * reset button reset.  Older
-                                        * drivers may report as
-                                        * userReboot. */
-       sys_watchdogInterrupt = 0x0A    /* Pre-watchdog interrupt */
-};
-
-#endif /* _ASM_MACH_POWERTV_ASIC_H */
diff --git a/arch/mips/include/asm/mach-powertv/asic_reg_map.h b/arch/mips/include/asm/mach-powertv/asic_reg_map.h
deleted file mode 100644 (file)
index 20348e8..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *                             asic_reg_map.h
- *
- * A macro-enclosed list of the elements for the register_map structure for
- * use in defining and manipulating the structure.
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-REGISTER_MAP_ELEMENT(eic_slow0_strt_add)
-REGISTER_MAP_ELEMENT(eic_cfg_bits)
-REGISTER_MAP_ELEMENT(eic_ready_status)
-REGISTER_MAP_ELEMENT(chipver3)
-REGISTER_MAP_ELEMENT(chipver2)
-REGISTER_MAP_ELEMENT(chipver1)
-REGISTER_MAP_ELEMENT(chipver0)
-REGISTER_MAP_ELEMENT(uart1_intstat)
-REGISTER_MAP_ELEMENT(uart1_inten)
-REGISTER_MAP_ELEMENT(uart1_config1)
-REGISTER_MAP_ELEMENT(uart1_config2)
-REGISTER_MAP_ELEMENT(uart1_divisorhi)
-REGISTER_MAP_ELEMENT(uart1_divisorlo)
-REGISTER_MAP_ELEMENT(uart1_data)
-REGISTER_MAP_ELEMENT(uart1_status)
-REGISTER_MAP_ELEMENT(int_stat_3)
-REGISTER_MAP_ELEMENT(int_stat_2)
-REGISTER_MAP_ELEMENT(int_stat_1)
-REGISTER_MAP_ELEMENT(int_stat_0)
-REGISTER_MAP_ELEMENT(int_config)
-REGISTER_MAP_ELEMENT(int_int_scan)
-REGISTER_MAP_ELEMENT(ien_int_3)
-REGISTER_MAP_ELEMENT(ien_int_2)
-REGISTER_MAP_ELEMENT(ien_int_1)
-REGISTER_MAP_ELEMENT(ien_int_0)
-REGISTER_MAP_ELEMENT(int_level_3_3)
-REGISTER_MAP_ELEMENT(int_level_3_2)
-REGISTER_MAP_ELEMENT(int_level_3_1)
-REGISTER_MAP_ELEMENT(int_level_3_0)
-REGISTER_MAP_ELEMENT(int_level_2_3)
-REGISTER_MAP_ELEMENT(int_level_2_2)
-REGISTER_MAP_ELEMENT(int_level_2_1)
-REGISTER_MAP_ELEMENT(int_level_2_0)
-REGISTER_MAP_ELEMENT(int_level_1_3)
-REGISTER_MAP_ELEMENT(int_level_1_2)
-REGISTER_MAP_ELEMENT(int_level_1_1)
-REGISTER_MAP_ELEMENT(int_level_1_0)
-REGISTER_MAP_ELEMENT(int_level_0_3)
-REGISTER_MAP_ELEMENT(int_level_0_2)
-REGISTER_MAP_ELEMENT(int_level_0_1)
-REGISTER_MAP_ELEMENT(int_level_0_0)
-REGISTER_MAP_ELEMENT(int_docsis_en)
-REGISTER_MAP_ELEMENT(mips_pll_setup)
-REGISTER_MAP_ELEMENT(fs432x4b4_usb_ctl)
-REGISTER_MAP_ELEMENT(test_bus)
-REGISTER_MAP_ELEMENT(crt_spare)
-REGISTER_MAP_ELEMENT(usb2_ohci_int_mask)
-REGISTER_MAP_ELEMENT(usb2_strap)
-REGISTER_MAP_ELEMENT(ehci_hcapbase)
-REGISTER_MAP_ELEMENT(ohci_hc_revision)
-REGISTER_MAP_ELEMENT(bcm1_bs_lmi_steer)
-REGISTER_MAP_ELEMENT(usb2_control)
-REGISTER_MAP_ELEMENT(usb2_stbus_obc)
-REGISTER_MAP_ELEMENT(usb2_stbus_mess_size)
-REGISTER_MAP_ELEMENT(usb2_stbus_chunk_size)
-REGISTER_MAP_ELEMENT(pcie_regs)
-REGISTER_MAP_ELEMENT(tim_ch)
-REGISTER_MAP_ELEMENT(tim_cl)
-REGISTER_MAP_ELEMENT(gpio_dout)
-REGISTER_MAP_ELEMENT(gpio_din)
-REGISTER_MAP_ELEMENT(gpio_dir)
-REGISTER_MAP_ELEMENT(watchdog)
-REGISTER_MAP_ELEMENT(front_panel)
-REGISTER_MAP_ELEMENT(misc_clk_ctl1)
-REGISTER_MAP_ELEMENT(misc_clk_ctl2)
-REGISTER_MAP_ELEMENT(crt_ext_ctl)
-REGISTER_MAP_ELEMENT(register_maps)
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
deleted file mode 100644 (file)
index 06712ab..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef __ASM_MACH_POWERTV_ASIC_H_
-#define __ASM_MACH_POWERTV_ASIC_H_
-#include <linux/io.h>
-
-/* ASIC types */
-enum asic_type {
-       ASIC_UNKNOWN,
-       ASIC_ZEUS,
-       ASIC_CALLIOPE,
-       ASIC_CRONUS,
-       ASIC_CRONUSLITE,
-       ASIC_GAIA,
-       ASICS                   /* Number of supported ASICs */
-};
-
-/* hardcoded values read from Chip Version registers */
-#define CRONUS_10      0x0B4C1C20
-#define CRONUS_11      0x0B4C1C21
-#define CRONUSLITE_10  0x0B4C1C40
-
-#define NAND_FLASH_BASE                0x03000000
-#define CALLIOPE_IO_BASE       0x08000000
-#define GAIA_IO_BASE           0x09000000
-#define CRONUS_IO_BASE         0x09000000
-#define ZEUS_IO_BASE           0x09000000
-
-#define ASIC_IO_SIZE           0x01000000
-
-/* Definitions for backward compatibility */
-#define UART1_INTSTAT  uart1_intstat
-#define UART1_INTEN    uart1_inten
-#define UART1_CONFIG1  uart1_config1
-#define UART1_CONFIG2  uart1_config2
-#define UART1_DIVISORHI uart1_divisorhi
-#define UART1_DIVISORLO uart1_divisorlo
-#define UART1_DATA     uart1_data
-#define UART1_STATUS   uart1_status
-
-/* ASIC register enumeration */
-union register_map_entry {
-       unsigned long phys;
-       u32 *virt;
-};
-
-#define REGISTER_MAP_ELEMENT(x) union register_map_entry x;
-struct register_map {
-#include <asm/mach-powertv/asic_reg_map.h>
-};
-#undef REGISTER_MAP_ELEMENT
-
-/**
- * register_map_offset_phys - add an offset to the physical address
- * @map:       Pointer to the &struct register_map
- * @offset:    Value to add
- *
- * Only adds the base to non-zero physical addresses
- */
-static inline void register_map_offset_phys(struct register_map *map,
-       unsigned long offset)
-{
-#define REGISTER_MAP_ELEMENT(x)                do {                            \
-               if (map->x.phys != 0)                                   \
-                       map->x.phys += offset;                          \
-       } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-/**
- * register_map_virtualize - Convert &register_map to virtual addresses
- * @map:       Pointer to &register_map to virtualize
- */
-static inline void register_map_virtualize(struct register_map *map)
-{
-#define REGISTER_MAP_ELEMENT(x)                do {                            \
-               map->x.virt = (!map->x.phys) ? NULL :                   \
-                       UNCAC_ADDR(phys_to_virt(map->x.phys));          \
-       } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-extern struct register_map _asic_register_map;
-extern unsigned long asic_phy_base;
-
-/*
- * Macros to interface to registers through their ioremapped address
- * asic_reg_phys_addr  Returns the physical address of the given register
- * asic_reg_addr       Returns the iomapped virtual address of the given
- *                     register.
- */
-#define asic_reg_addr(x)       (_asic_register_map.x.virt)
-#define asic_reg_phys_addr(x)  (virt_to_phys((void *) CAC_ADDR(        \
-                                       (unsigned long) asic_reg_addr(x))))
-
-/*
- * The asic_reg macro is gone. It should be replaced by either asic_read or
- * asic_write, as appropriate.
- */
-
-#define asic_read(x)           readl(asic_reg_addr(x))
-#define asic_write(v, x)       writel(v, asic_reg_addr(x))
-
-extern void asic_irq_init(void);
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h
deleted file mode 100644 (file)
index 58c76ec..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2010  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define cpu_has_tlb                    1
-#define cpu_has_4kex                   1
-#define cpu_has_3k_cache               0
-#define cpu_has_4k_cache               1
-#define cpu_has_tx39_cache             0
-#define cpu_has_fpu                    0
-#define cpu_has_counter                        1
-#define cpu_has_watch                  1
-#define cpu_has_divec                  1
-#define cpu_has_vce                    0
-#define cpu_has_cache_cdex_p           0
-#define cpu_has_cache_cdex_s           0
-#define cpu_has_mcheck                 1
-#define cpu_has_ejtag                  1
-#define cpu_has_llsc                   1
-#define cpu_has_mips16                 0
-#define cpu_has_mdmx                   0
-#define cpu_has_mips3d                 0
-#define cpu_has_smartmips              0
-#define cpu_has_vtag_icache            0
-#define cpu_has_dc_aliases             0
-#define cpu_has_ic_fills_f_dc          0
-#define cpu_has_mips32r1               0
-#define cpu_has_mips32r2               1
-#define cpu_has_mips64r1               0
-#define cpu_has_mips64r2               0
-#define cpu_has_dsp                    0
-#define cpu_has_dsp2                   0
-#define cpu_has_mipsmt                 0
-#define cpu_has_userlocal              0
-#define cpu_has_nofpuex                        0
-#define cpu_has_64bits                 0
-#define cpu_has_64bit_zero_reg         0
-#define cpu_has_vint                   1
-#define cpu_has_veic                   1
-#define cpu_has_inclusive_pcaches      0
-
-#define cpu_dcache_line_size()         32
-#define cpu_icache_line_size()         32
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
deleted file mode 100644 (file)
index f831672..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009 Cisco Systems, Inc.
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- */
-
-#ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
-#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
-
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <asm/mach-powertv/asic.h>
-
-static inline bool is_kseg2(void *addr)
-{
-       return (unsigned long)addr >= KSEG2;
-}
-
-static inline unsigned long virt_to_phys_from_pte(void *addr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-
-       unsigned long virt_addr = (unsigned long)addr;
-       unsigned long phys_addr = 0UL;
-
-       /* get the page global directory. */
-       pgd = pgd_offset_k(virt_addr);
-
-       if (!pgd_none(*pgd)) {
-               /* get the page upper directory */
-               pud = pud_offset(pgd, virt_addr);
-               if (!pud_none(*pud)) {
-                       /* get the page middle directory */
-                       pmd = pmd_offset(pud, virt_addr);
-                       if (!pmd_none(*pmd)) {
-                               /* get a pointer to the page table entry */
-                               ptep = pte_offset(pmd, virt_addr);
-                               pte = *ptep;
-                               /* check for a valid page */
-                               if (pte_present(pte)) {
-                                       /* get the physical address the page is
-                                        * referring to */
-                                       phys_addr = (unsigned long)
-                                               page_to_phys(pte_page(pte));
-                                       /* add the offset within the page */
-                                       phys_addr |= (virt_addr & ~PAGE_MASK);
-                               }
-                       }
-               }
-       }
-
-       return phys_addr;
-}
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       if (is_kseg2(addr))
-               return phys_to_dma(virt_to_phys_from_pte(addr));
-       else
-               return phys_to_dma(virt_to_phys(addr));
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       return phys_to_dma(page_to_phys(page));
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return dma_to_phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 0;
-}
-
-#endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
deleted file mode 100644 (file)
index 6c463be..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
-#define _ASM_MACH_POWERTV_INTERRUPTS_H_
-
-/*
- * Defines for all of the interrupt lines
- */
-
-/* Definitions for backward compatibility */
-#define kIrq_Uart1             irq_uart1
-
-#define ibase 0
-
-/*------------- Register: int_stat_3 */
-/* 126 unused (bit 31) */
-#define irq_asc2video          (ibase+126)     /* ASC 2 Video Interrupt */
-#define irq_asc1video          (ibase+125)     /* ASC 1 Video Interrupt */
-#define irq_comms_block_wd     (ibase+124)     /* ASC 1 Video Interrupt */
-#define irq_fdma_mailbox       (ibase+123)     /* FDMA Mailbox Output */
-#define irq_fdma_gp            (ibase+122)     /* FDMA GP Output */
-#define irq_mips_pic           (ibase+121)     /* MIPS Performance Counter
-                                                * Interrupt */
-#define irq_mips_timer         (ibase+120)     /* MIPS Timer Interrupt */
-#define irq_memory_protect     (ibase+119)     /* Memory Protection Interrupt
-                                                * -- Ored by glue logic inside
-                                                *  SPARC ILC (see
-                                                *  INT_MEM_PROT_STAT, below,
-                                                *  for individual interrupts)
-                                                */
-/* 118 unused (bit 22) */
-#define irq_sbag               (ibase+117)     /* SBAG Interrupt -- Ored by
-                                                * glue logic inside SPARC ILC
-                                                * (see INT_SBAG_STAT, below,
-                                                * for individual interrupts) */
-#define irq_qam_b_fec          (ibase+116)     /* QAM  B FEC Interrupt */
-#define irq_qam_a_fec          (ibase+115)     /* QAM A FEC Interrupt */
-/* 114 unused  (bit 18) */
-#define irq_mailbox            (ibase+113)     /* Mailbox Debug Interrupt  --
-                                                * Ored by glue logic inside
-                                                * SPARC ILC (see
-                                                * INT_MAILBOX_STAT, below, for
-                                                * individual interrupts) */
-#define irq_fuse_stat1         (ibase+112)     /* Fuse Status 1 */
-#define irq_fuse_stat2         (ibase+111)     /* Fuse Status 2 */
-#define irq_fuse_stat3         (ibase+110)     /* Blitter Interrupt / Fuse
-                                                * Status 3 */
-#define irq_blitter            (ibase+110)     /* Blitter Interrupt / Fuse
-                                                * Status 3 */
-#define irq_avc1_pp0           (ibase+109)     /* AVC Decoder #1 PP0
-                                                * Interrupt */
-#define irq_avc1_pp1           (ibase+108)     /* AVC Decoder #1 PP1
-                                                * Interrupt */
-#define irq_avc1_mbe           (ibase+107)     /* AVC Decoder #1 MBE
-                                                * Interrupt */
-#define irq_avc2_pp0           (ibase+106)     /* AVC Decoder #2 PP0
-                                                * Interrupt */
-#define irq_avc2_pp1           (ibase+105)     /* AVC Decoder #2 PP1
-                                                * Interrupt */
-#define irq_avc2_mbe           (ibase+104)     /* AVC Decoder #2 MBE
-                                                * Interrupt */
-#define irq_zbug_spi           (ibase+103)     /* Zbug SPI Slave Interrupt */
-#define irq_qam_mod2           (ibase+102)     /* QAM Modulator 2 DMA
-                                                * Interrupt */
-#define irq_ir_rx              (ibase+101)     /* IR RX 2 Interrupt */
-#define irq_aud_dsp2           (ibase+100)     /* Audio DSP #2 Interrupt */
-#define irq_aud_dsp1           (ibase+99)      /* Audio DSP #1 Interrupt */
-#define irq_docsis             (ibase+98)      /* DOCSIS Debug Interrupt */
-#define irq_sd_dvp1            (ibase+97)      /* SD DVP #1 Interrupt */
-#define irq_sd_dvp2            (ibase+96)      /* SD DVP #2 Interrupt */
-/*------------- Register: int_stat_2 */
-#define irq_hd_dvp             (ibase+95)      /* HD DVP Interrupt */
-#define kIrq_Prewatchdog       (ibase+94)      /* watchdog Pre-Interrupt */
-#define irq_timer2             (ibase+93)      /* Programmable Timer
-                                                * Interrupt 2 */
-#define irq_1394               (ibase+92)      /* 1394 Firewire Interrupt */
-#define irq_usbohci            (ibase+91)      /* USB 2.0 OHCI Interrupt */
-#define irq_usbehci            (ibase+90)      /* USB 2.0 EHCI Interrupt */
-#define irq_pciexp             (ibase+89)      /* PCI Express 0 Interrupt */
-#define irq_pciexp0            (ibase+89)      /* PCI Express 0 Interrupt */
-#define irq_afe1               (ibase+88)      /* AFE 1 Interrupt */
-#define irq_sata               (ibase+87)      /* SATA 1 Interrupt */
-#define irq_sata1              (ibase+87)      /* SATA 1 Interrupt */
-#define irq_dtcp               (ibase+86)      /* DTCP Interrupt */
-#define irq_pciexp1            (ibase+85)      /* PCI Express 1 Interrupt */
-/* 84 unused   (bit 20) */
-/* 83 unused   (bit 19) */
-/* 82 unused   (bit 18) */
-#define irq_sata2              (ibase+81)      /* SATA2 Interrupt */
-#define irq_uart2              (ibase+80)      /* UART2 Interrupt */
-#define irq_legacy_usb         (ibase+79)      /* Legacy USB Host ISR (1.1
-                                                * Host module) */
-#define irq_pod                        (ibase+78)      /* POD Interrupt */
-#define irq_slave_usb          (ibase+77)      /* Slave USB */
-#define irq_denc1              (ibase+76)      /* DENC #1 VTG Interrupt */
-#define irq_vbi_vtg            (ibase+75)      /* VBI VTG Interrupt */
-#define irq_afe2               (ibase+74)      /* AFE 2 Interrupt */
-#define irq_denc2              (ibase+73)      /* DENC #2 VTG Interrupt */
-#define irq_asc2               (ibase+72)      /* ASC #2 Interrupt */
-#define irq_asc1               (ibase+71)      /* ASC #1 Interrupt */
-#define irq_mod_dma            (ibase+70)      /* Modulator DMA Interrupt */
-#define irq_byte_eng1          (ibase+69)      /* Byte Engine Interrupt [1] */
-#define irq_byte_eng0          (ibase+68)      /* Byte Engine Interrupt [0] */
-/* 67 unused   (bit 03) */
-/* 66 unused   (bit 02) */
-/* 65 unused   (bit 01) */
-/* 64 unused   (bit 00) */
-/*------------- Register: int_stat_1 */
-/* 63 unused   (bit 31) */
-/* 62 unused   (bit 30) */
-/* 61 unused   (bit 29) */
-/* 60 unused   (bit 28) */
-/* 59 unused   (bit 27) */
-/* 58 unused   (bit 26) */
-/* 57 unused   (bit 25) */
-/* 56 unused   (bit 24) */
-#define irq_buf_dma_mem2mem    (ibase+55)      /* BufDMA Memory to Memory
-                                                * Interrupt */
-#define irq_buf_dma_usbtransmit (ibase+54)     /* BufDMA USB Transmit
-                                                * Interrupt */
-#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
-                                                * Interrupt */
-#define irq_buf_dma_transmit_error (ibase+52)  /* BufDMA Transmit Error
-                                                * Interrupt */
-#define irq_buf_dma_usbrecv    (ibase+51)      /* BufDMA USB Receive
-                                                * Interrupt */
-#define irq_buf_dma_qpskpodrecv (ibase+50)     /* BufDMA QPSK/POD Receive
-                                                * Interrupt */
-#define irq_buf_dma_recv_error (ibase+49)      /* BufDMA Receive Error
-                                                * Interrupt */
-#define irq_qamdma_transmit_play (ibase+48)    /* QAMDMA Transmit/Play
-                                                * Interrupt */
-#define irq_qamdma_transmit_error (ibase+47)   /* QAMDMA Transmit Error
-                                                * Interrupt */
-#define irq_qamdma_recv2high   (ibase+46)      /* QAMDMA Receive 2 High
-                                                * (Chans 63-32) */
-#define irq_qamdma_recv2low    (ibase+45)      /* QAMDMA Receive 2 Low
-                                                * (Chans 31-0) */
-#define irq_qamdma_recv1high   (ibase+44)      /* QAMDMA Receive 1 High
-                                                * (Chans 63-32) */
-#define irq_qamdma_recv1low    (ibase+43)      /* QAMDMA Receive 1 Low
-                                                * (Chans 31-0) */
-#define irq_qamdma_recv_error  (ibase+42)      /* QAMDMA Receive Error
-                                                * Interrupt */
-#define irq_mpegsplice         (ibase+41)      /* MPEG Splice Interrupt */
-#define irq_deinterlace_rdy    (ibase+40)      /* Deinterlacer Frame Ready
-                                                * Interrupt */
-#define irq_ext_in0            (ibase+39)      /* External Interrupt irq_in0 */
-#define irq_gpio3              (ibase+38)      /* GP I/O IRQ 3 - From GP I/O
-                                                * Module */
-#define irq_gpio2              (ibase+37)      /* GP I/O IRQ 2 - From GP I/O
-                                                * Module (ABE_intN) */
-#define irq_pcrcmplt1          (ibase+36)      /* PCR Capture Complete  or
-                                                * Discontinuity 1 */
-#define irq_pcrcmplt2          (ibase+35)      /* PCR Capture Complete or
-                                                * Discontinuity 2 */
-#define irq_parse_peierr       (ibase+34)      /* PID Parser Error Detect
-                                                * (PEI) */
-#define irq_parse_cont_err     (ibase+33)      /* PID Parser continuity error
-                                                * detect */
-#define irq_ds1framer          (ibase+32)      /* DS1 Framer Interrupt */
-/*------------- Register: int_stat_0 */
-#define irq_gpio1              (ibase+31)      /* GP I/O IRQ 1 - From GP I/O
-                                                * Module */
-#define irq_gpio0              (ibase+30)      /* GP I/O IRQ 0 - From GP I/O
-                                                * Module */
-#define irq_qpsk_out_aloha     (ibase+29)      /* QPSK Output Slotted Aloha
-                                                * (chan 3) Transmission
-                                                * Completed OK */
-#define irq_qpsk_out_tdma      (ibase+28)      /* QPSK Output TDMA (chan 2)
-                                                * Transmission Completed OK */
-#define irq_qpsk_out_reserve   (ibase+27)      /* QPSK Output Reservation
-                                                * (chan 1) Transmission
-                                                * Completed OK */
-#define irq_qpsk_out_aloha_err (ibase+26)      /* QPSK Output Slotted Aloha
-                                                * (chan 3)Transmission
-                                                * completed with Errors. */
-#define irq_qpsk_out_tdma_err  (ibase+25)      /* QPSK Output TDMA (chan 2)
-                                                * Transmission completed with
-                                                * Errors. */
-#define irq_qpsk_out_rsrv_err  (ibase+24)      /* QPSK Output Reservation
-                                                * (chan 1) Transmission
-                                                * completed with Errors */
-#define irq_aloha_fail         (ibase+23)      /* Unsuccessful Resend of Aloha
-                                                * for N times. Aloha retry
-                                                * timeout for channel 3. */
-#define irq_timer1             (ibase+22)      /* Programmable Timer
-                                                * Interrupt */
-#define irq_keyboard           (ibase+21)      /* Keyboard Module Interrupt */
-#define irq_i2c                        (ibase+20)      /* I2C Module Interrupt */
-#define irq_spi                        (ibase+19)      /* SPI Module Interrupt */
-#define irq_irblaster          (ibase+18)      /* IR Blaster Interrupt */
-#define irq_splice_detect      (ibase+17)      /* PID Key Change Interrupt or
-                                                * Splice Detect Interrupt */
-#define irq_se_micro           (ibase+16)      /* Secure Micro I/F Module
-                                                * Interrupt */
-#define irq_uart1              (ibase+15)      /* UART Interrupt */
-#define irq_irrecv             (ibase+14)      /* IR Receiver Interrupt */
-#define irq_host_int1          (ibase+13)      /* Host-to-Host Interrupt 1 */
-#define irq_host_int0          (ibase+12)      /* Host-to-Host Interrupt 0 */
-#define irq_qpsk_hecerr                (ibase+11)      /* QPSK HEC Error Interrupt */
-#define irq_qpsk_crcerr                (ibase+10)      /* QPSK AAL-5 CRC Error
-                                                * Interrupt */
-/* 9 unused    (bit 09) */
-/* 8 unused    (bit 08) */
-#define irq_psicrcerr          (ibase+7)       /* QAM PSI CRC Error
-                                                * Interrupt */
-#define irq_psilength_err      (ibase+6)       /* QAM PSI Length Error
-                                                * Interrupt */
-#define irq_esfforward         (ibase+5)       /* ESF Interrupt Mark From
-                                                * Forward Path Reference -
-                                                * every 3ms when forward Mbits
-                                                * and forward slot control
-                                                * bytes are updated. */
-#define irq_esfreverse         (ibase+4)       /* ESF Interrupt Mark from
-                                                * Reverse Path Reference -
-                                                * delayed from forward mark by
-                                                * the ranging delay plus a
-                                                * fixed amount. When reverse
-                                                * Mbits and reverse slot
-                                                * control bytes are updated.
-                                                * Occurs every 3ms for 3.0M and
-                                                * 1.554 M upstream rates and
-                                                * every 6 ms for 256K upstream
-                                                * rate. */
-#define irq_aloha_timeout      (ibase+3)       /* Slotted-Aloha timeout on
-                                                * Channel 1. */
-#define irq_reservation                (ibase+2)       /* Partial (or Incremental)
-                                                * Reservation Message Completed
-                                                * or Slotted aloha verify for
-                                                * channel 1. */
-#define irq_aloha3             (ibase+1)       /* Slotted-Aloha Message Verify
-                                                * Interrupt or Reservation
-                                                * increment completed for
-                                                * channel 3. */
-#define irq_mpeg_d             (ibase+0)       /* MPEG Decoder Interrupt */
-#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
deleted file mode 100644 (file)
index c86ef09..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- * Portions Copyright (C)  Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_IOREMAP_H
-#define __ASM_MACH_POWERTV_IOREMAP_H
-
-#include <linux/types.h>
-#include <linux/log2.h>
-#include <linux/compiler.h>
-
-#include <asm/pgtable-bits.h>
-#include <asm/addrspace.h>
-
-/* We're going to mess with bits, so get sizes */
-#define IOR_BPC                        8                       /* Bits per char */
-#define IOR_PHYS_BITS          (IOR_BPC * sizeof(phys_addr_t))
-#define IOR_DMA_BITS           (IOR_BPC * sizeof(dma_addr_t))
-
-/*
- * Define the granularity of physical/DMA mapping in terms of the number
- * of bits that defines the offset within a grain. These will be the
- * least significant bits of the address. The rest of a physical or DMA
- * address will be used to index into an appropriate table to find the
- * offset to add to the address to yield the corresponding DMA or physical
- * address, respectively.
- */
-#define IOR_LSBITS             22                      /* Bits in a grain */
-
-/*
- * Compute the number of most significant address bits after removing those
- * used for the offset within a grain and then compute the number of table
- * entries for the conversion.
- */
-#define IOR_PHYS_MSBITS                (IOR_PHYS_BITS - IOR_LSBITS)
-#define IOR_NUM_PHYS_TO_DMA    ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
-
-#define IOR_DMA_MSBITS         (IOR_DMA_BITS - IOR_LSBITS)
-#define IOR_NUM_DMA_TO_PHYS    ((dma_addr_t) 1 << IOR_DMA_MSBITS)
-
-/*
- * Define data structures used as elements in the arrays for the conversion
- * between physical and DMA addresses. We do some slightly fancy math to
- * compute the width of the offset element of the conversion tables so
- * that we can have the smallest conversion tables. Next, round up the
- * sizes to the next higher power of two, i.e. the offset element will have
- * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
- * bits.  Finally, we compute a shift value that puts the most significant
- * bits of the offset into the most significant bits of the offset element.
- * This makes it more efficient on processors without barrel shifters and
- * easier to see the values if the conversion table is dumped in binary.
- */
-#define _IOR_OFFSET_WIDTH(n)   (1 << order_base_2(n))
-#define IOR_OFFSET_WIDTH(n) \
-       (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
-
-#define IOR_PHYS_OFFSET_BITS   IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
-#define IOR_PHYS_SHIFT         (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
-
-#define IOR_DMA_OFFSET_BITS    IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
-#define IOR_DMA_SHIFT          (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
-
-struct ior_phys_to_dma {
-       dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
-               __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
-};
-
-struct ior_dma_to_phys {
-       dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
-               __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
-};
-
-extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-
-static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
-{
-       return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
-}
-
-static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
-{
-       return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
-}
-
-/* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addresses to virtual
- * addresses and dma_map*() and friends to map virtual addresses into DMA
- * addresses and back.
- */
-static inline dma_addr_t phys_to_dma(phys_addr_t phys)
-{
-       return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(dma_addr_t dma)
-{
-       return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
-}
-
-extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
-       dma_addr_t size);
-
-/*
- * Allow physical addresses to be fixed up to help peripherals located
- * outside the low 32-bit range -- generic pass-through version.
- */
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
-{
-       return phys_addr;
-}
-
-/*
- * Handle the special case of addresses the area aliased into the first
- * 512 MiB of the processor's physical address space. These turn into either
- * kseg0 or kseg1 addresses, depending on flags.
- */
-static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
-       unsigned long flags)
-{
-       phys_addr_t start_offset;
-       void __iomem *result = NULL;
-
-       /* Start by checking to see whether this is an aliased address */
-       start_offset = _dma_to_phys_offset_raw(start);
-
-       /*
-        * If:
-        * o    the memory is aliased into the first 512 MiB, and
-        * o    the start and end are in the same RAM bank, and
-        * o    we don't have a zero size or wrap around, and
-        * o    we are supposed to create an uncached mapping,
-        *      handle this is a kseg0 or kseg1 address
-        */
-       if (start_offset != 0) {
-               phys_addr_t last;
-               dma_addr_t dma_to_phys_offset;
-
-               last = start + size - 1;
-               dma_to_phys_offset =
-                       _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
-
-               if (dma_to_phys_offset == start_offset &&
-                       size != 0 && start <= last) {
-                       phys_t adjusted_start;
-                       adjusted_start = start + start_offset;
-                       if (flags == _CACHE_UNCACHED)
-                               result = (void __iomem *) (unsigned long)
-                                       CKSEG1ADDR(adjusted_start);
-                       else
-                               result = (void __iomem *) (unsigned long)
-                                       CKSEG0ADDR(adjusted_start);
-               }
-       }
-
-       return result;
-}
-
-static inline int plat_iounmap(const volatile void __iomem *addr)
-{
-       return 0;
-}
-#endif /* __ASM_MACH_POWERTV_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-powertv/irq.h b/arch/mips/include/asm/mach-powertv/irq.h
deleted file mode 100644 (file)
index 4bd5d0c..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_IRQ_H
-#define _ASM_MACH_POWERTV_IRQ_H
-#include <asm/mach-powertv/interrupts.h>
-
-#define MIPS_CPU_IRQ_BASE      ibase
-#define NR_IRQS                        127
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/powertv-clock.h b/arch/mips/include/asm/mach-powertv/powertv-clock.h
deleted file mode 100644 (file)
index 6f3e9a0..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-/*
- * Local definitions for the powertv PCI code
- */
-
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
-
-extern int log_level;
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/war.h b/arch/mips/include/asm/mach-powertv/war.h
deleted file mode 100644 (file)
index c5651c8..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This version for the PowerTV platform copied from the Malta version.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_WAR_H
-#define __ASM_MACH_POWERTV_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR    0
-#define R4600_V1_HIT_CACHEOP_WAR       0
-#define R4600_V2_HIT_CACHEOP_WAR       0
-#define R5432_CP0_INTERRUPT_WAR                0
-#define BCM1250_M3_WAR                 0
-#define SIBYTE_1956_WAR                        0
-#define MIPS4K_ICACHE_REFILL_WAR       1
-#define MIPS_CACHE_SYNC_WAR            1
-#define TX49XX_ICACHE_INDEX_INV_WAR    0
-#define ICACHE_REFILLS_WORKAROUND_WAR  1
-#define R10000_LLSC_WAR                        0
-#define MIPS34K_MISSED_ITLB_WAR                0
-
-#endif /* __ASM_MACH_POWERTV_WAR_H */
index a02596cf1abd418a80653b5ef36bdda6cbbb4bbd..e33227998713e644a53e21c2e11c3e5fa174c970 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
 #ifndef __ASM_MIPS_BOARDS_PIIX4_H
 #define __ASM_MIPS_BOARDS_PIIX4_H
 
-/************************************************************************
- *  IO register offsets
- ************************************************************************/
-#define PIIX4_ICTLR1_ICW1      0x20
-#define PIIX4_ICTLR1_ICW2      0x21
-#define PIIX4_ICTLR1_ICW3      0x21
-#define PIIX4_ICTLR1_ICW4      0x21
-#define PIIX4_ICTLR2_ICW1      0xa0
-#define PIIX4_ICTLR2_ICW2      0xa1
-#define PIIX4_ICTLR2_ICW3      0xa1
-#define PIIX4_ICTLR2_ICW4      0xa1
-#define PIIX4_ICTLR1_OCW1      0x21
-#define PIIX4_ICTLR1_OCW2      0x20
-#define PIIX4_ICTLR1_OCW3      0x20
-#define PIIX4_ICTLR1_OCW4      0x20
-#define PIIX4_ICTLR2_OCW1      0xa1
-#define PIIX4_ICTLR2_OCW2      0xa0
-#define PIIX4_ICTLR2_OCW3      0xa0
-#define PIIX4_ICTLR2_OCW4      0xa0
-
-
-/************************************************************************
- *  Register encodings.
- ************************************************************************/
-#define PIIX4_OCW2_NSEOI       (0x1 << 5)
-#define PIIX4_OCW2_SEOI                (0x3 << 5)
-#define PIIX4_OCW2_RNSEOI      (0x5 << 5)
-#define PIIX4_OCW2_RAEOIS      (0x4 << 5)
-#define PIIX4_OCW2_RAEOIC      (0x0 << 5)
-#define PIIX4_OCW2_RSEOI       (0x7 << 5)
-#define PIIX4_OCW2_SP          (0x6 << 5)
-#define PIIX4_OCW2_NOP         (0x2 << 5)
-
-#define PIIX4_OCW2_SEL         (0x0 << 3)
-
-#define PIIX4_OCW2_ILS_0       0
-#define PIIX4_OCW2_ILS_1       1
-#define PIIX4_OCW2_ILS_2       2
-#define PIIX4_OCW2_ILS_3       3
-#define PIIX4_OCW2_ILS_4       4
-#define PIIX4_OCW2_ILS_5       5
-#define PIIX4_OCW2_ILS_6       6
-#define PIIX4_OCW2_ILS_7       7
-#define PIIX4_OCW2_ILS_8       0
-#define PIIX4_OCW2_ILS_9       1
-#define PIIX4_OCW2_ILS_10      2
-#define PIIX4_OCW2_ILS_11      3
-#define PIIX4_OCW2_ILS_12      4
-#define PIIX4_OCW2_ILS_13      5
-#define PIIX4_OCW2_ILS_14      6
-#define PIIX4_OCW2_ILS_15      7
-
-#define PIIX4_OCW3_SEL         (0x1 << 3)
-
-#define PIIX4_OCW3_IRR         0x2
-#define PIIX4_OCW3_ISR         0x3
+/* PIRQX Route Control */
+#define PIIX4_FUNC0_PIRQRC                     0x60
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE       (1 << 7)
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK          0xf
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX           16
+/* Top Of Memory */
+#define PIIX4_FUNC0_TOM                                0x69
+#define   PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK           0xf0
+/* Deterministic Latency Control */
+#define PIIX4_FUNC0_DLC                                0x82
+#define   PIIX4_FUNC0_DLC_USBPR_EN                     (1 << 2)
+#define   PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN           (1 << 1)
+#define   PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN       (1 << 0)
+
+/* IDE Timing */
+#define PIIX4_FUNC1_IDETIM_PRIMARY_LO          0x40
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI          0x41
+#define   PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN  (1 << 7)
+#define PIIX4_FUNC1_IDETIM_SECONDARY_LO                0x42
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI                0x43
+#define   PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN        (1 << 7)
 
 #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
index 3b29079b5424b4fe0f78588ab1b8821aef0a1237..e277bbad28713d3a510cf5e5dec337316f0a745a 100644 (file)
 #endif /* SMTC */
 #include <asm-generic/mm_hooks.h>
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-
 #define TLBMISS_HANDLER_SETUP_PGD(pgd)                                 \
 do {                                                                   \
        extern void tlbmiss_handler_setup_pgd(unsigned long);           \
        tlbmiss_handler_setup_pgd((unsigned long)(pgd));                \
 } while (0)
 
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 #define TLBMISS_HANDLER_SETUP()                                                \
        do {                                                            \
                TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);              \
-               write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
+               write_c0_xcontext((unsigned long) smp_processor_id() << \
+                                               SMP_CPUID_REGSHIFT);    \
        } while (0)
 
-#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
+#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 
 /*
  * For the fast tlb miss handlers, we keep a per cpu array of pointers
@@ -47,21 +47,11 @@ do {                                                                        \
  */
 extern unsigned long pgd_current[];
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
-       pgd_current[smp_processor_id()] = (unsigned long)(pgd)
-
-#ifdef CONFIG_32BIT
 #define TLBMISS_HANDLER_SETUP()                                                \
-       write_c0_context((unsigned long) smp_processor_id() << 25);     \
+       write_c0_context((unsigned long) smp_processor_id() <<          \
+                                               SMP_CPUID_REGSHIFT);    \
        back_to_back_c0_hazard();                                       \
        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
-#ifdef CONFIG_64BIT
-#define TLBMISS_HANDLER_SETUP()                                                \
-       write_c0_context((unsigned long) smp_processor_id() << 26);     \
-       back_to_back_c0_hazard();                                       \
-       TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
index 5e6cd0947393295ea87cba5191c5a6fbf728c527..7bba9da110afab3f9c92eb02a52161ac23642d29 100644 (file)
@@ -81,7 +81,6 @@ static inline long regs_return_value(struct pt_regs *regs)
 
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
-#define user_stack_pointer(r) ((r)->regs[29])
 
 extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
 extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
@@ -100,4 +99,17 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
        (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1;      \
 })
 
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+       return regs->regs[29];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+       unsigned long val)
+{
+       regs->regs[29] = val;
+}
+
 #endif /* _ASM_PTRACE_H */
index a0b2650516ac9a2837a362f4c5768ce609767216..34d1a19171257ff8d8e602988615b8b8e2878ff9 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/asm.h>
 #include <asm/cacheops.h>
 #include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
 
 /*
@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
 static inline void flush_icache_line(unsigned long addr)
 {
        __iflush_prologue
-       cache_op(Hit_Invalidate_I, addr);
+       switch (boot_cpu_type()) {
+       case CPU_LOONGSON2:
+               cache_op(Hit_Invalidate_I_Loongson23, addr);
+               break;
+
+       default:
+               cache_op(Hit_Invalidate_I, addr);
+               break;
+       }
        __iflush_epilogue
 }
 
@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
  */
 static inline void protected_flush_icache_line(unsigned long addr)
 {
-       protected_cache_op(Hit_Invalidate_I, addr);
+       switch (boot_cpu_type()) {
+       case CPU_LOONGSON2:
+               protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
+               break;
+
+       default:
+               protected_cache_op(Hit_Invalidate_I, addr);
+               break;
+       }
 }
 
 /*
@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
 
 /* build blast_xxx_range, protected_blast_xxx_range */
-#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
-static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
+#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)       \
+static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
                                                    unsigned long end)  \
 {                                                                      \
        unsigned long lsize = cpu_##desc##_line_size();                 \
@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
        __##pfx##flush_epilogue                                         \
 }
 
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
-__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
+       protected_, loongson23_)
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 /* blast_inv_dcache_range */
-__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
-__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
+__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
+__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
 
 #endif /* _ASM_R4KCACHE_H */
index e26589ef36eee12a9d5a957edf0df6d59ae7217e..d7bfdeba9e845acfa83270f01bb280a687d785ad 100644 (file)
@@ -5,6 +5,14 @@
 
 extern void setup_early_printk(void);
 
+#ifdef CONFIG_EARLY_PRINTK_8250
+extern void setup_8250_early_printk_port(unsigned long base,
+       unsigned int reg_shift, unsigned int timeout);
+#else
+static inline void setup_8250_early_printk_port(unsigned long base,
+       unsigned int reg_shift, unsigned int timeout) {}
+#endif
+
 extern void set_handler(unsigned long offset, void *addr, unsigned long len);
 extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
 
index 23fc95e65673bcb5d907a77f83714c424092add6..4857e2c8df5ae2eae1ac4bcca27b7dca586842aa 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/asmmacro.h>
 #include <asm/mipsregs.h>
 #include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
 
 /*
  * For SMTC kernel, global IE should be left set, and interrupts
                .endm
 
 #ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
-#define PTEBASE_SHIFT  19      /* TCBIND */
-#define CPU_ID_REG CP0_TCBIND
-#define CPU_ID_MFC0 mfc0
-#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
-#define PTEBASE_SHIFT  48      /* XCONTEXT */
-#define CPU_ID_REG CP0_XCONTEXT
-#define CPU_ID_MFC0 MFC0
-#else
-#define PTEBASE_SHIFT  23      /* CONTEXT */
-#define CPU_ID_REG CP0_CONTEXT
-#define CPU_ID_MFC0 MFC0
-#endif
                .macro  get_saved_sp    /* SMP variation */
-               CPU_ID_MFC0     k0, CPU_ID_REG
+               ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                lui     k1, %hi(kernelsp)
 #else
                daddiu  k1, %hi(kernelsp)
                dsll    k1, 16
 #endif
-               LONG_SRL        k0, PTEBASE_SHIFT
+               LONG_SRL        k0, SMP_CPUID_PTRSHIFT
                LONG_ADDU       k1, k0
                LONG_L  k1, %lo(kernelsp)(k1)
                .endm
 
                .macro  set_saved_sp stackp temp temp2
-               CPU_ID_MFC0     \temp, CPU_ID_REG
-               LONG_SRL        \temp, PTEBASE_SHIFT
+               ASM_CPUID_MFC0  \temp, ASM_SMP_CPUID_REG
+               LONG_SRL        \temp, SMP_CPUID_PTRSHIFT
                LONG_S  \stackp, kernelsp(\temp)
                .endm
-#else
+#else /* !CONFIG_SMP */
                .macro  get_saved_sp    /* Uniprocessor variation */
 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
                /*
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..81c8913
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ *
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#ifndef __ASM_MIPS_SYSCALL_H
+#define __ASM_MIPS_SYSCALL_H
+
+#include <linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+                                 struct pt_regs *regs)
+{
+       return regs->regs[2];
+}
+
+static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
+       struct task_struct *task, struct pt_regs *regs, unsigned int n)
+{
+       unsigned long usp = regs->regs[29];
+
+       switch (n) {
+       case 0: case 1: case 2: case 3:
+               *arg = regs->regs[4 + n];
+
+               return 0;
+
+#ifdef CONFIG_32BIT
+       case 4: case 5: case 6: case 7:
+               return get_user(*arg, (int *)usp + 4 * n);
+#endif
+
+#ifdef CONFIG_64BIT
+       case 4: case 5: case 6: case 7:
+#ifdef CONFIG_MIPS32_O32
+               if (test_thread_flag(TIF_32BIT_REGS))
+                       return get_user(*arg, (int *)usp + 4 * n);
+               else
+#endif
+                       *arg = regs->regs[4 + n];
+
+               return 0;
+#endif
+
+       default:
+               BUG();
+       }
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->regs[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       if (error) {
+               regs->regs[2] = -error;
+               regs->regs[7] = -1;
+       } else {
+               regs->regs[2] = val;
+               regs->regs[7] = 0;
+       }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        unsigned long *args)
+{
+       unsigned long arg;
+       int ret;
+
+       while (n--)
+               ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+
+       /*
+        * No way to communicate an error because this is a void function.
+        */
+#if 0
+       return ret;
+#endif
+}
+
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys32_call_table[];
+extern const unsigned long sysn32_call_table[];
+
+static inline int __syscall_get_arch(void)
+{
+       int arch = EM_MIPS;
+#ifdef CONFIG_64BIT
+       arch |=  __AUDIT_ARCH_64BIT;
+#endif
+#if defined(__LITTLE_ENDIAN)
+       arch |=  __AUDIT_ARCH_LE;
+#endif
+       return arch;
+}
+
+#endif /* __ASM_MIPS_SYSCALL_H */
index 61215a34acc68ff87da07665c65c8dc0f6dbfcc1..f9b24bfbdbae96f8cc31d58d7cb0318130ceab9f 100644 (file)
@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_32BIT_ADDR         23      /* 32-bit address space (o32/n32) */
 #define TIF_FPUBOUND           24      /* thread bound to FPU-full CPU set */
 #define TIF_LOAD_WATCH         25      /* If set, load watch registers */
+#define TIF_SYSCALL_TRACEPOINT 26      /* syscall tracepoint instrumentation */
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -132,21 +133,54 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_32BIT_ADDR                (1<<TIF_32BIT_ADDR)
 #define _TIF_FPUBOUND          (1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
+#define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_WORK_SYSCALL_ENTRY        (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT)
+                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT)
+                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK         \
        (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (_TIF_NOHZ | _TIF_WORK_MASK |           \
-                                _TIF_WORK_SYSCALL_EXIT)
+                                _TIF_WORK_SYSCALL_EXIT |               \
+                                _TIF_SYSCALL_TRACEPOINT)
 
-#endif /* __KERNEL__ */
+/*
+ * We stash processor id into a COP0 register to retrieve it fast
+ * at kernel exception entry.
+ */
+#if defined(CONFIG_MIPS_MT_SMTC)
+#define SMP_CPUID_REG          2, 2    /* TCBIND */
+#define ASM_SMP_CPUID_REG      $2, 2
+#define SMP_CPUID_PTRSHIFT     19
+#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#define SMP_CPUID_REG          20, 0   /* XCONTEXT */
+#define ASM_SMP_CPUID_REG      $20
+#define SMP_CPUID_PTRSHIFT     48
+#else
+#define SMP_CPUID_REG          4, 0    /* CONTEXT */
+#define ASM_SMP_CPUID_REG      $4
+#define SMP_CPUID_PTRSHIFT     23
+#endif
 
+#ifdef CONFIG_64BIT
+#define SMP_CPUID_REGSHIFT     (SMP_CPUID_PTRSHIFT + 3)
+#else
+#define SMP_CPUID_REGSHIFT     (SMP_CPUID_PTRSHIFT + 2)
+#endif
+
+#ifdef CONFIG_MIPS_MT_SMTC
+#define ASM_CPUID_MFC0         mfc0
+#define UASM_i_CPUID_MFC0      uasm_i_mfc0
+#else
+#define ASM_CPUID_MFC0         MFC0
+#define UASM_i_CPUID_MFC0      UASM_i_MFC0
+#endif
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_THREAD_INFO_H */
index 2d7b9df4542dd478d53f53d8151d3381d1c58aee..24f534a7fbc3bd84c7bd3b3bda2238e3ce183943 100644 (file)
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
 
 static inline int init_mips_clocksource(void)
 {
-#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
+#ifdef CONFIG_CSRC_R4K
        return init_r4k_clocksource();
 #else
        return 0;
index 63c9c886173a68c6a89857b7f7d28046cb712e56..4d3b92886665799d2ca2b746d8efc310cc0c1168 100644 (file)
 
 #include <uapi/asm/unistd.h>
 
+#ifdef CONFIG_MIPS32_N32
+#define NR_syscalls  (__NR_N32_Linux + __NR_N32_Linux_syscalls)
+#elif defined(CONFIG_64BIT)
+#define NR_syscalls  (__NR_64_Linux + __NR_64_Linux_syscalls)
+#else
+#define NR_syscalls  (__NR_O32_Linux + __NR_O32_Linux_syscalls)
+#endif
 
 #ifndef __ASSEMBLY__
 
index 88e292b7719e99963f74692f0ffbd36e60e79c83..e81174432bab0eb7c1e41093e6bdd5ed833fc851 100644 (file)
@@ -33,6 +33,8 @@ struct siginfo;
 #error _MIPS_SZLONG neither 32 nor 64
 #endif
 
+#define __ARCH_SIGSYS
+
 #include <asm-generic/siginfo.h>
 
 typedef struct siginfo {
@@ -97,6 +99,13 @@ typedef struct siginfo {
                        __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
                } _sigpoll;
+
+               /* SIGSYS */
+               struct {
+                       void __user *_call_addr; /* calling user insn */
+                       int _syscall;   /* triggering system call number */
+                       unsigned int _arch;     /* AUDIT_ARCH_* of syscall */
+               } _sigsys;
        } _sifields;
 } siginfo_t;
 
index 61c01f054d1b160f753582b60888e59592b95b21..0df9787cd84d9e4a93e04513f1654b0327b3df0f 100644 (file)
@@ -94,4 +94,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 423d871a946ba15ae5b5ea70338530949fa8d166..1c1b71752c84cbc26dc2c0f81cc778e0a749a495 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_CEVT_TXX9)               += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
 obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
-obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
@@ -35,6 +34,7 @@ obj-$(CONFIG_STACKTRACE)      += stacktrace.o
 obj-$(CONFIG_MODULES)          += mips_ksyms.o module.o
 obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
 
+obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 
 obj-$(CONFIG_CPU_R4K_FPU)      += r4k_fpu.o r4k_switch.o
@@ -84,6 +84,7 @@ obj-$(CONFIG_GPIO_TXX9)               += gpio_txx9.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
 
index 5465dc183e5ac4d26e36d300b1df36fb4cf4d2b5..c814287bdf5d10d8a44a9e6a843b3acd5ab30ff8 100644 (file)
@@ -376,13 +376,33 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                                __cpu_name[cpu] = "R4000PC";
                        }
                } else {
+                       int cca = read_c0_config() & CONF_CM_CMASK;
+                       int mc;
+
+                       /*
+                        * SC and MC versions can't be reliably told apart,
+                        * but only the latter support coherent caching
+                        * modes so assume the firmware has set the KSEG0
+                        * coherency attribute reasonably (if uncached, we
+                        * assume SC).
+                        */
+                       switch (cca) {
+                       case CONF_CM_CACHABLE_CE:
+                       case CONF_CM_CACHABLE_COW:
+                       case CONF_CM_CACHABLE_CUW:
+                               mc = 1;
+                               break;
+                       default:
+                               mc = 0;
+                               break;
+                       }
                        if ((c->processor_id & PRID_REV_MASK) >=
                            PRID_REV_R4400) {
-                               c->cputype = CPU_R4400SC;
-                               __cpu_name[cpu] = "R4400SC";
+                               c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+                               __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
                        } else {
-                               c->cputype = CPU_R4000SC;
-                               __cpu_name[cpu] = "R4000SC";
+                               c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+                               __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
                        }
                }
 
@@ -1079,8 +1099,8 @@ void cpu_report(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
 
-       printk(KERN_INFO "CPU revision is: %08x (%s)\n",
-              c->processor_id, cpu_name_string());
+       pr_info("CPU%d revision is: %08x (%s)\n",
+               smp_processor_id(), c->processor_id, cpu_name_string());
        if (c->options & MIPS_CPU_FPU)
                printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
 }
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
deleted file mode 100644 (file)
index abd99ea..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2008 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-/*
- * The file comes from kernel/csrc-r4k.c
- */
-#include <linux/clocksource.h>
-#include <linux/init.h>
-
-#include <asm/time.h>                  /* Not included in linux/time.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "powertv-clock.h"
-
-/* MIPS PLL Register Definitions */
-#define PLL_GET_M(x)           (((x) >> 8) & 0x000000FF)
-#define PLL_GET_N(x)           (((x) >> 16) & 0x000000FF)
-#define PLL_GET_P(x)           (((x) >> 24) & 0x00000007)
-
-/*
- * returns:  Clock frequency in kHz
- */
-unsigned int __init mips_get_pll_freq(void)
-{
-       unsigned int pll_reg, m, n, p;
-       unsigned int fin = 54000; /* Base frequency in kHz */
-       unsigned int fout;
-
-       /* Read PLL register setting */
-       pll_reg = asic_read(mips_pll_setup);
-       m = PLL_GET_M(pll_reg);
-       n = PLL_GET_N(pll_reg);
-       p = PLL_GET_P(pll_reg);
-       pr_info("MIPS PLL Register:0x%x  M=%d  N=%d  P=%d\n", pll_reg, m, n, p);
-
-       /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
-       fout = ((2 * n * fin) / (m * (0x01 << p)));
-
-       pr_info("MIPS Clock Freq=%d kHz\n", fout);
-
-       return fout;
-}
-
-static cycle_t c0_hpt_read(struct clocksource *cs)
-{
-       return read_c0_count();
-}
-
-static struct clocksource clocksource_mips = {
-       .name           = "powertv-counter",
-       .read           = c0_hpt_read,
-       .mask           = CLOCKSOURCE_MASK(32),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init powertv_c0_hpt_clocksource_init(void)
-{
-       unsigned int pll_freq = mips_get_pll_freq();
-
-       pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
-               (pll_freq % 1000) * 100 / 1000);
-
-       mips_hpt_frequency = pll_freq / 2 * 1000;
-
-       clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
-
-       clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
-}
-
-/**
- * struct tim_c - free running counter
- * @hi: High 16 bits of the counter
- * @lo: Low 32 bits of the counter
- *
- * Lays out the structure of the free running counter in memory. This counter
- * increments at a rate of 27 MHz/8 on all platforms.
- */
-struct tim_c {
-       unsigned int hi;
-       unsigned int lo;
-};
-
-static struct tim_c *tim_c;
-
-static cycle_t tim_c_read(struct clocksource *cs)
-{
-       unsigned int hi;
-       unsigned int next_hi;
-       unsigned int lo;
-
-       hi = readl(&tim_c->hi);
-
-       for (;;) {
-               lo = readl(&tim_c->lo);
-               next_hi = readl(&tim_c->hi);
-               if (next_hi == hi)
-                       break;
-               hi = next_hi;
-       }
-
-pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo);
-       return ((u64) hi << 32) | lo;
-}
-
-#define TIM_C_SIZE             48              /* # bits in the timer */
-
-static struct clocksource clocksource_tim_c = {
-       .name           = "powertv-tim_c",
-       .read           = tim_c_read,
-       .mask           = CLOCKSOURCE_MASK(TIM_C_SIZE),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/**
- * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
- *
- * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
- * 1 / (27,000,000/8) seconds.
- */
-static void __init powertv_tim_c_clocksource_init(void)
-{
-       const unsigned long     counts_per_second = 27000000 / 8;
-
-       clocksource_tim_c.rating = 200;
-
-       clocksource_register_hz(&clocksource_tim_c, counts_per_second);
-       tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
-}
-
-/**
- powertv_clocksource_init - initialize all clocksources
- */
-void __init powertv_clocksource_init(void)
-{
-       powertv_c0_hpt_clocksource_init();
-       powertv_tim_c_clocksource_init();
-}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644 (file)
index 0000000..83cea37
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ *  8250/16550-type serial ports prom_putchar()
+ *
+ *  Copyright (C) 2010  Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+                                 unsigned int timeout)
+{
+       serial8250_base = (void __iomem *)base;
+       serial8250_reg_shift = reg_shift;
+       serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+       return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+       writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+       unsigned int timeout;
+       int status, bits;
+
+       if (!serial8250_base)
+               return;
+
+       timeout = serial8250_tx_timeout;
+       bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+       do {
+               status = serial_in(UART_LSR);
+
+               if (--timeout == 0)
+                       break;
+       } while ((status & bits) != bits);
+
+       if (timeout)
+               serial_out(UART_TX, c);
+}
index dba90ec0dc385ffcad5cc09eda51031d4e9a0fcc..185ba258361b979ee9531bd18d38b0242e1de979 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/init.h>
 #include <linux/ftrace.h>
+#include <linux/syscalls.h>
 
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
+#include <asm/syscall.h>
 #include <asm/uasm.h>
+#include <asm/unistd.h>
 
 #include <asm-generic/sections.h>
 
@@ -364,3 +367,33 @@ out:
        WARN_ON(1);
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+       return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+       if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+               return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+       if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+               return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+       if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+               return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+       return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
index 31fa856829cbf2620521317e5247d42b9e3fb087..72853aa26b77393c95e4c147f8631d63702782e4 100644 (file)
@@ -374,12 +374,20 @@ NESTED(except_vec_nmi, 0, sp)
 NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
+       /*
+        * Clear ERL - restore segment mapping
+        * Clear BEV - required for page fault exception handler to work
+        */
+       mfc0    k0, CP0_STATUS
+       ori     k0, k0, ST0_EXL
+       li      k1, ~(ST0_BEV | ST0_ERL)
+       and     k0, k0, k1
+       mtc0    k0, CP0_STATUS
+       ehb
        SAVE_ALL
        move    a0, sp
        jal     nmi_exception_handler
-       RESTORE_ALL
-       .set    mips3
-       eret
+       /* nmi_exception_handler never returns */
        .set    pop
        END(nmi_handler)
 
index 72ef2d25cbf21ab5bebb1a93b0c0bf6e4e4fdff7..e498f2b3646a167e73f36e0f74857a5e6da6308e 100644 (file)
@@ -150,7 +150,7 @@ int __init mips_cpu_intc_init(struct device_node *of_node,
        domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
                                       &mips_cpu_intc_irq_domain_ops, NULL);
        if (!domain)
-               panic("Failed to add irqdomain for MIPS CPU\n");
+               panic("Failed to add irqdomain for MIPS CPU");
 
        return 0;
 }
index 977a623d9253ca3580084652ee264b3c4f59a6c2..2a52568dbcd6786cf0ca3c7100f056abba8ed08b 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/moduleloader.h>
 #include <linux/elf.h>
 #include <linux/mm.h>
+#include <linux/numa.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -46,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
 void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
-                               GFP_KERNEL, PAGE_KERNEL, -1,
+                               GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 #endif
index 4204d76af854209659de87b35e143ec85fb802da..029e002a4ea0083d626140961d3cd465b188fabc 100644 (file)
@@ -73,7 +73,7 @@
 3:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index 8ae1ebef8b71e67fd5e205646bd10adcdb1b0299..b52e1d2b33e03836002b495328124225ebba29db 100644 (file)
  */
 #include <linux/compiler.h>
 #include <linux/context_tracking.h>
+#include <linux/elf.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/regset.h>
 #include <linux/smp.h>
 #include <linux/user.h>
 #include <linux/security.h>
+#include <linux/tracehook.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
+#include <linux/ftrace.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/mipsmtregs.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
+#include <asm/syscall.h>
 #include <asm/uaccess.h>
 #include <asm/bootinfo.h>
 #include <asm/reg.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
 /*
  * Called by kernel/ptrace.c when detaching..
  *
@@ -255,6 +263,133 @@ int ptrace_set_watch_regs(struct task_struct *child,
        return 0;
 }
 
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  void *kbuf, void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  regs, 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       struct pt_regs newregs;
+       int ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &newregs,
+                                0, sizeof(newregs));
+       if (ret)
+               return ret;
+
+       *task_pt_regs(target) = newregs;
+
+       return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  void *kbuf, void __user *ubuf)
+{
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  &target->thread.fpu,
+                                  0, sizeof(elf_fpregset_t));
+       /* XXX fcr31  */
+}
+
+static int fpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu,
+                                 0, sizeof(elf_fpregset_t));
+       /* XXX fcr31  */
+}
+
+enum mips_regset {
+       REGSET_GPR,
+       REGSET_FPR,
+};
+
+static const struct user_regset mips_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(unsigned int),
+               .align          = sizeof(unsigned int),
+               .get            = gpr_get,
+               .set            = gpr_set,
+       },
+       [REGSET_FPR] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = ELF_NFPREG,
+               .size           = sizeof(elf_fpreg_t),
+               .align          = sizeof(elf_fpreg_t),
+               .get            = fpr_get,
+               .set            = fpr_set,
+       },
+};
+
+static const struct user_regset_view user_mips_view = {
+       .name           = "mips",
+       .e_machine      = ELF_ARCH,
+       .ei_osabi       = ELF_OSABI,
+       .regsets        = mips_regsets,
+       .n              = ARRAY_SIZE(mips_regsets),
+};
+
+static const struct user_regset mips64_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(unsigned long),
+               .align          = sizeof(unsigned long),
+               .get            = gpr_get,
+               .set            = gpr_set,
+       },
+       [REGSET_FPR] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = ELF_NFPREG,
+               .size           = sizeof(elf_fpreg_t),
+               .align          = sizeof(elf_fpreg_t),
+               .get            = fpr_get,
+               .set            = fpr_set,
+       },
+};
+
+static const struct user_regset_view user_mips64_view = {
+       .name           = "mips",
+       .e_machine      = ELF_ARCH,
+       .ei_osabi       = ELF_OSABI,
+       .regsets        = mips64_regsets,
+       .n              = ARRAY_SIZE(mips_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+       return &user_mips_view;
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+               if (test_thread_flag(TIF_32BIT_REGS))
+                       return &user_mips_view;
+#endif
+
+       return &user_mips64_view;
+}
+
 long arch_ptrace(struct task_struct *child, long request,
                 unsigned long addr, unsigned long data)
 {
@@ -517,52 +652,27 @@ long arch_ptrace(struct task_struct *child, long request,
        return ret;
 }
 
-static inline int audit_arch(void)
-{
-       int arch = EM_MIPS;
-#ifdef CONFIG_64BIT
-       arch |=  __AUDIT_ARCH_64BIT;
-#endif
-#if defined(__LITTLE_ENDIAN)
-       arch |=  __AUDIT_ARCH_LE;
-#endif
-       return arch;
-}
-
 /*
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
  */
 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
 {
+       long ret = 0;
        user_exit();
 
        /* do the secure computing check first */
        secure_computing_strict(regs->regs[2]);
 
-       if (!(current->ptrace & PT_PTRACED))
-               goto out;
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               goto out;
+       if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+           tracehook_report_syscall_entry(regs))
+               ret = -1;
 
-       /* The 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
-
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->regs[2]);
 
-out:
-       audit_syscall_entry(audit_arch(), regs->regs[2],
+       audit_syscall_entry(__syscall_get_arch(),
+                           regs->regs[2],
                            regs->regs[4], regs->regs[5],
                            regs->regs[6], regs->regs[7]);
 }
@@ -582,26 +692,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 
        audit_syscall_exit(regs);
 
-       if (!(current->ptrace & PT_PTRACED))
-               return;
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-
-       /* The 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_exit(regs, regs->regs[2]);
 
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, 0);
 
        user_enter();
 }
index 38af83f84c4af846ce2a6fb55994804b4d781bdc..20b7b040e76f1c4e8d9a019edae6014f3ee3fea5 100644 (file)
@@ -67,7 +67,7 @@ LEAF(resume)
 1:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index 921238a6bd260d238cd2a01e177556ed105c770b..078de5eaca8fd96d8bcb720491fe3f56901dc03c 100644 (file)
@@ -69,7 +69,7 @@
 1:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index e774bb1088b5f16970feaea1bddf6eaead240a01..e8e541b40d86be37a2e1fe9c693bbfe912581bfb 100644 (file)
@@ -40,17 +40,58 @@ NESTED(handle_sys, PT_SIZE, sp)
        sw      t1, PT_EPC(sp)
        beqz    t0, illegal_syscall
 
-       sll     t0, v0, 3
+       sll     t0, v0, 2
        la      t1, sys_call_table
        addu    t1, t0
        lw      t2, (t1)                # syscall routine
-       lw      t3, 4(t1)               # >= 0 if we need stack arguments
        beqz    t2, illegal_syscall
 
        sw      a3, PT_R26(sp)          # save a3 for syscall restarting
-       bgez    t3, stackargs
 
-stack_done:
+       /*
+        * More than four arguments.  Try to deal with it by copying the
+        * stack arguments from the user stack to the kernel stack.
+        * This Sucks (TM).
+        */
+       lw      t0, PT_R29(sp)          # get old user stack pointer
+
+       /*
+        * We intentionally keep the kernel stack a little below the top of
+        * userspace so we don't have to do a slower byte accurate check here.
+        */
+       lw      t5, TI_ADDR_LIMIT($28)
+       addu    t4, t0, 32
+       and     t5, t4
+       bltz    t5, bad_stack           # -> sp is bad
+
+       /*
+        * Ok, copy the args from the luser stack to the kernel stack.
+        * t3 is the precomputed number of instruction bytes needed to
+        * load or store arguments 6-8.
+        */
+
+       .set    push
+       .set    noreorder
+       .set    nomacro
+
+1:     lw      t5, 16(t0)              # argument #5 from usp
+4:     lw      t6, 20(t0)              # argument #6 from usp
+3:     lw      t7, 24(t0)              # argument #7 from usp
+2:     lw      t8, 28(t0)              # argument #8 from usp
+
+       sw      t5, 16(sp)              # argument #5 to ksp
+       sw      t6, 20(sp)              # argument #6 to ksp
+       sw      t7, 24(sp)              # argument #7 to ksp
+       sw      t8, 28(sp)              # argument #8 to ksp
+       .set    pop
+
+       .section __ex_table,"a"
+       PTR     1b,bad_stack
+       PTR     2b,bad_stack
+       PTR     3b,bad_stack
+       PTR     4b,bad_stack
+       .previous
+
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
@@ -101,66 +142,6 @@ syscall_trace_entry:
 
 /* ------------------------------------------------------------------------ */
 
-       /*
-        * More than four arguments.  Try to deal with it by copying the
-        * stack arguments from the user stack to the kernel stack.
-        * This Sucks (TM).
-        */
-stackargs:
-       lw      t0, PT_R29(sp)          # get old user stack pointer
-
-       /*
-        * We intentionally keep the kernel stack a little below the top of
-        * userspace so we don't have to do a slower byte accurate check here.
-        */
-       lw      t5, TI_ADDR_LIMIT($28)
-       addu    t4, t0, 32
-       and     t5, t4
-       bltz    t5, bad_stack           # -> sp is bad
-
-       /* Ok, copy the args from the luser stack to the kernel stack.
-        * t3 is the precomputed number of instruction bytes needed to
-        * load or store arguments 6-8.
-        */
-
-       la      t1, 5f                  # load up to 3 arguments
-       subu    t1, t3
-1:     lw      t5, 16(t0)              # argument #5 from usp
-       .set    push
-       .set    noreorder
-       .set    nomacro
-       jr      t1
-        addiu  t1, 6f - 5f
-
-2:     lw      t8, 28(t0)              # argument #8 from usp
-3:     lw      t7, 24(t0)              # argument #7 from usp
-4:     lw      t6, 20(t0)              # argument #6 from usp
-5:     jr      t1
-        sw     t5, 16(sp)              # argument #5 to ksp
-
-#ifdef CONFIG_CPU_MICROMIPS
-       sw      t8, 28(sp)              # argument #8 to ksp
-       nop
-       sw      t7, 24(sp)              # argument #7 to ksp
-       nop
-       sw      t6, 20(sp)              # argument #6 to ksp
-       nop
-#else
-       sw      t8, 28(sp)              # argument #8 to ksp
-       sw      t7, 24(sp)              # argument #7 to ksp
-       sw      t6, 20(sp)              # argument #6 to ksp
-#endif
-6:     j       stack_done              # go back
-        nop
-       .set    pop
-
-       .section __ex_table,"a"
-       PTR     1b,bad_stack
-       PTR     2b,bad_stack
-       PTR     3b,bad_stack
-       PTR     4b,bad_stack
-       .previous
-
        /*
         * The stackpointer for a call with more than 4 arguments is bad.
         * We probably should handle this case a bit more drastic.
@@ -187,7 +168,7 @@ illegal_syscall:
        subu    t0, a0, __NR_O32_Linux  # check syscall number
        sltiu   v0, t0, __NR_O32_Linux_syscalls + 1
        beqz    t0, einval              # do not recurse
-       sll     t1, t0, 3
+       sll     t1, t0, 2
        beqz    v0, einval
        lw      t2, sys_call_table(t1)          # syscall routine
 
@@ -218,260 +199,248 @@ einval: li      v0, -ENOSYS
        jr      ra
        END(sys_syscall)
 
-       .macro  fifty ptr, nargs, from=1, to=50
-       sys     \ptr            \nargs
-       .if     \to-\from
-       fifty   \ptr,\nargs,"(\from+1)",\to
-       .endif
-       .endm
-
-       .macro  mille ptr, nargs, from=1, to=20
-       fifty   \ptr,\nargs
-       .if     \to-\from
-       mille   \ptr,\nargs,"(\from+1)",\to
-       .endif
-       .endm
-
-       .macro  syscalltable
-       sys     sys_syscall             8       /* 4000 */
-       sys     sys_exit                1
-       sys     __sys_fork              0
-       sys     sys_read                3
-       sys     sys_write               3
-       sys     sys_open                3       /* 4005 */
-       sys     sys_close               1
-       sys     sys_waitpid             3
-       sys     sys_creat               2
-       sys     sys_link                2
-       sys     sys_unlink              1       /* 4010 */
-       sys     sys_execve              0
-       sys     sys_chdir               1
-       sys     sys_time                1
-       sys     sys_mknod               3
-       sys     sys_chmod               2       /* 4015 */
-       sys     sys_lchown              3
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0       /* was sys_stat */
-       sys     sys_lseek               3
-       sys     sys_getpid              0       /* 4020 */
-       sys     sys_mount               5
-       sys     sys_oldumount           1
-       sys     sys_setuid              1
-       sys     sys_getuid              0
-       sys     sys_stime               1       /* 4025 */
-       sys     sys_ptrace              4
-       sys     sys_alarm               1
-       sys     sys_ni_syscall          0       /* was sys_fstat */
-       sys     sys_pause               0
-       sys     sys_utime               2       /* 4030 */
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0
-       sys     sys_access              2
-       sys     sys_nice                1
-       sys     sys_ni_syscall          0       /* 4035 */
-       sys     sys_sync                0
-       sys     sys_kill                2
-       sys     sys_rename              2
-       sys     sys_mkdir               2
-       sys     sys_rmdir               1       /* 4040 */
-       sys     sys_dup                 1
-       sys     sysm_pipe               0
-       sys     sys_times               1
-       sys     sys_ni_syscall          0
-       sys     sys_brk                 1       /* 4045 */
-       sys     sys_setgid              1
-       sys     sys_getgid              0
-       sys     sys_ni_syscall          0       /* was signal(2) */
-       sys     sys_geteuid             0
-       sys     sys_getegid             0       /* 4050 */
-       sys     sys_acct                1
-       sys     sys_umount              2
-       sys     sys_ni_syscall          0
-       sys     sys_ioctl               3
-       sys     sys_fcntl               3       /* 4055 */
-       sys     sys_ni_syscall          2
-       sys     sys_setpgid             2
-       sys     sys_ni_syscall          0
-       sys     sys_olduname            1
-       sys     sys_umask               1       /* 4060 */
-       sys     sys_chroot              1
-       sys     sys_ustat               2
-       sys     sys_dup2                2
-       sys     sys_getppid             0
-       sys     sys_getpgrp             0       /* 4065 */
-       sys     sys_setsid              0
-       sys     sys_sigaction           3
-       sys     sys_sgetmask            0
-       sys     sys_ssetmask            1
-       sys     sys_setreuid            2       /* 4070 */
-       sys     sys_setregid            2
-       sys     sys_sigsuspend          0
-       sys     sys_sigpending          1
-       sys     sys_sethostname         2
-       sys     sys_setrlimit           2       /* 4075 */
-       sys     sys_getrlimit           2
-       sys     sys_getrusage           2
-       sys     sys_gettimeofday        2
-       sys     sys_settimeofday        2
-       sys     sys_getgroups           2       /* 4080 */
-       sys     sys_setgroups           2
-       sys     sys_ni_syscall          0       /* old_select */
-       sys     sys_symlink             2
-       sys     sys_ni_syscall          0       /* was sys_lstat */
-       sys     sys_readlink            3       /* 4085 */
-       sys     sys_uselib              1
-       sys     sys_swapon              2
-       sys     sys_reboot              3
-       sys     sys_old_readdir         3
-       sys     sys_mips_mmap           6       /* 4090 */
-       sys     sys_munmap              2
-       sys     sys_truncate            2
-       sys     sys_ftruncate           2
-       sys     sys_fchmod              2
-       sys     sys_fchown              3       /* 4095 */
-       sys     sys_getpriority         2
-       sys     sys_setpriority         3
-       sys     sys_ni_syscall          0
-       sys     sys_statfs              2
-       sys     sys_fstatfs             2       /* 4100 */
-       sys     sys_ni_syscall          0       /* was ioperm(2) */
-       sys     sys_socketcall          2
-       sys     sys_syslog              3
-       sys     sys_setitimer           3
-       sys     sys_getitimer           2       /* 4105 */
-       sys     sys_newstat             2
-       sys     sys_newlstat            2
-       sys     sys_newfstat            2
-       sys     sys_uname               1
-       sys     sys_ni_syscall          0       /* 4110 was iopl(2) */
-       sys     sys_vhangup             0
-       sys     sys_ni_syscall          0       /* was sys_idle() */
-       sys     sys_ni_syscall          0       /* was sys_vm86 */
-       sys     sys_wait4               4
-       sys     sys_swapoff             1       /* 4115 */
-       sys     sys_sysinfo             1
-       sys     sys_ipc                 6
-       sys     sys_fsync               1
-       sys     sys_sigreturn           0
-       sys     __sys_clone             6       /* 4120 */
-       sys     sys_setdomainname       2
-       sys     sys_newuname            1
-       sys     sys_ni_syscall          0       /* sys_modify_ldt */
-       sys     sys_adjtimex            1
-       sys     sys_mprotect            3       /* 4125 */
-       sys     sys_sigprocmask         3
-       sys     sys_ni_syscall          0       /* was create_module */
-       sys     sys_init_module         5
-       sys     sys_delete_module       1
-       sys     sys_ni_syscall          0       /* 4130 was get_kernel_syms */
-       sys     sys_quotactl            4
-       sys     sys_getpgid             1
-       sys     sys_fchdir              1
-       sys     sys_bdflush             2
-       sys     sys_sysfs               3       /* 4135 */
-       sys     sys_personality         1
-       sys     sys_ni_syscall          0       /* for afs_syscall */
-       sys     sys_setfsuid            1
-       sys     sys_setfsgid            1
-       sys     sys_llseek              5       /* 4140 */
-       sys     sys_getdents            3
-       sys     sys_select              5
-       sys     sys_flock               2
-       sys     sys_msync               3
-       sys     sys_readv               3       /* 4145 */
-       sys     sys_writev              3
-       sys     sys_cacheflush          3
-       sys     sys_cachectl            3
-       sys     sys_sysmips             4
-       sys     sys_ni_syscall          0       /* 4150 */
-       sys     sys_getsid              1
-       sys     sys_fdatasync           1
-       sys     sys_sysctl              1
-       sys     sys_mlock               2
-       sys     sys_munlock             2       /* 4155 */
-       sys     sys_mlockall            1
-       sys     sys_munlockall          0
-       sys     sys_sched_setparam      2
-       sys     sys_sched_getparam      2
-       sys     sys_sched_setscheduler  3       /* 4160 */
-       sys     sys_sched_getscheduler  1
-       sys     sys_sched_yield         0
-       sys     sys_sched_get_priority_max 1
-       sys     sys_sched_get_priority_min 1
-       sys     sys_sched_rr_get_interval 2     /* 4165 */
-       sys     sys_nanosleep,          2
-       sys     sys_mremap,             5
-       sys     sys_accept              3
-       sys     sys_bind                3
-       sys     sys_connect             3       /* 4170 */
-       sys     sys_getpeername         3
-       sys     sys_getsockname         3
-       sys     sys_getsockopt          5
-       sys     sys_listen              2
-       sys     sys_recv                4       /* 4175 */
-       sys     sys_recvfrom            6
-       sys     sys_recvmsg             3
-       sys     sys_send                4
-       sys     sys_sendmsg             3
-       sys     sys_sendto              6       /* 4180 */
-       sys     sys_setsockopt          5
-       sys     sys_shutdown            2
-       sys     sys_socket              3
-       sys     sys_socketpair          4
-       sys     sys_setresuid           3       /* 4185 */
-       sys     sys_getresuid           3
-       sys     sys_ni_syscall          0       /* was sys_query_module */
-       sys     sys_poll                3
-       sys     sys_ni_syscall          0       /* was nfsservctl */
-       sys     sys_setresgid           3       /* 4190 */
-       sys     sys_getresgid           3
-       sys     sys_prctl               5
-       sys     sys_rt_sigreturn        0
-       sys     sys_rt_sigaction        4
-       sys     sys_rt_sigprocmask      4       /* 4195 */
-       sys     sys_rt_sigpending       2
-       sys     sys_rt_sigtimedwait     4
-       sys     sys_rt_sigqueueinfo     3
-       sys     sys_rt_sigsuspend       0
-       sys     sys_pread64             6       /* 4200 */
-       sys     sys_pwrite64            6
-       sys     sys_chown               3
-       sys     sys_getcwd              2
-       sys     sys_capget              2
-       sys     sys_capset              2       /* 4205 */
-       sys     sys_sigaltstack         0
-       sys     sys_sendfile            4
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0
-       sys     sys_mips_mmap2          6       /* 4210 */
-       sys     sys_truncate64          4
-       sys     sys_ftruncate64         4
-       sys     sys_stat64              2
-       sys     sys_lstat64             2
-       sys     sys_fstat64             2       /* 4215 */
-       sys     sys_pivot_root          2
-       sys     sys_mincore             3
-       sys     sys_madvise             3
-       sys     sys_getdents64          3
-       sys     sys_fcntl64             3       /* 4220 */
-       sys     sys_ni_syscall          0
-       sys     sys_gettid              0
-       sys     sys_readahead           5
-       sys     sys_setxattr            5
-       sys     sys_lsetxattr           5       /* 4225 */
-       sys     sys_fsetxattr           5
-       sys     sys_getxattr            4
-       sys     sys_lgetxattr           4
-       sys     sys_fgetxattr           4
-       sys     sys_listxattr           3       /* 4230 */
-       sys     sys_llistxattr          3
-       sys     sys_flistxattr          3
-       sys     sys_removexattr         2
-       sys     sys_lremovexattr        2
-       sys     sys_fremovexattr        2       /* 4235 */
-       sys     sys_tkill               2
-       sys     sys_sendfile64          5
-       sys     sys_futex               6
+       .align  2
+       .type   sys_call_table, @object
+EXPORT(sys_call_table)
+       PTR     sys_syscall                     /* 4000 */
+       PTR     sys_exit
+       PTR     __sys_fork
+       PTR     sys_read
+       PTR     sys_write
+       PTR     sys_open                        /* 4005 */
+       PTR     sys_close
+       PTR     sys_waitpid
+       PTR     sys_creat
+       PTR     sys_link
+       PTR     sys_unlink                      /* 4010 */
+       PTR     sys_execve
+       PTR     sys_chdir
+       PTR     sys_time
+       PTR     sys_mknod
+       PTR     sys_chmod                       /* 4015 */
+       PTR     sys_lchown
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall                  /* was sys_stat */
+       PTR     sys_lseek
+       PTR     sys_getpid                      /* 4020 */
+       PTR     sys_mount
+       PTR     sys_oldumount
+       PTR     sys_setuid
+       PTR     sys_getuid
+       PTR     sys_stime                       /* 4025 */
+       PTR     sys_ptrace
+       PTR     sys_alarm
+       PTR     sys_ni_syscall                  /* was sys_fstat */
+       PTR     sys_pause
+       PTR     sys_utime                       /* 4030 */
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall
+       PTR     sys_access
+       PTR     sys_nice
+       PTR     sys_ni_syscall                  /* 4035 */
+       PTR     sys_sync
+       PTR     sys_kill
+       PTR     sys_rename
+       PTR     sys_mkdir
+       PTR     sys_rmdir                       /* 4040 */
+       PTR     sys_dup
+       PTR     sysm_pipe
+       PTR     sys_times
+       PTR     sys_ni_syscall
+       PTR     sys_brk                         /* 4045 */
+       PTR     sys_setgid
+       PTR     sys_getgid
+       PTR     sys_ni_syscall                  /* was signal(2) */
+       PTR     sys_geteuid
+       PTR     sys_getegid                     /* 4050 */
+       PTR     sys_acct
+       PTR     sys_umount
+       PTR     sys_ni_syscall
+       PTR     sys_ioctl
+       PTR     sys_fcntl                       /* 4055 */
+       PTR     sys_ni_syscall
+       PTR     sys_setpgid
+       PTR     sys_ni_syscall
+       PTR     sys_olduname
+       PTR     sys_umask                       /* 4060 */
+       PTR     sys_chroot
+       PTR     sys_ustat
+       PTR     sys_dup2
+       PTR     sys_getppid
+       PTR     sys_getpgrp                     /* 4065 */
+       PTR     sys_setsid
+       PTR     sys_sigaction
+       PTR     sys_sgetmask
+       PTR     sys_ssetmask
+       PTR     sys_setreuid                    /* 4070 */
+       PTR     sys_setregid
+       PTR     sys_sigsuspend
+       PTR     sys_sigpending
+       PTR     sys_sethostname
+       PTR     sys_setrlimit                   /* 4075 */
+       PTR     sys_getrlimit
+       PTR     sys_getrusage
+       PTR     sys_gettimeofday
+       PTR     sys_settimeofday
+       PTR     sys_getgroups                   /* 4080 */
+       PTR     sys_setgroups
+       PTR     sys_ni_syscall                  /* old_select */
+       PTR     sys_symlink
+       PTR     sys_ni_syscall                  /* was sys_lstat */
+       PTR     sys_readlink                    /* 4085 */
+       PTR     sys_uselib
+       PTR     sys_swapon
+       PTR     sys_reboot
+       PTR     sys_old_readdir
+       PTR     sys_mips_mmap                   /* 4090 */
+       PTR     sys_munmap
+       PTR     sys_truncate
+       PTR     sys_ftruncate
+       PTR     sys_fchmod
+       PTR     sys_fchown                      /* 4095 */
+       PTR     sys_getpriority
+       PTR     sys_setpriority
+       PTR     sys_ni_syscall
+       PTR     sys_statfs
+       PTR     sys_fstatfs                     /* 4100 */
+       PTR     sys_ni_syscall                  /* was ioperm(2) */
+       PTR     sys_socketcall
+       PTR     sys_syslog
+       PTR     sys_setitimer
+       PTR     sys_getitimer                   /* 4105 */
+       PTR     sys_newstat
+       PTR     sys_newlstat
+       PTR     sys_newfstat
+       PTR     sys_uname
+       PTR     sys_ni_syscall                  /* 4110 was iopl(2) */
+       PTR     sys_vhangup
+       PTR     sys_ni_syscall                  /* was sys_idle() */
+       PTR     sys_ni_syscall                  /* was sys_vm86 */
+       PTR     sys_wait4
+       PTR     sys_swapoff                     /* 4115 */
+       PTR     sys_sysinfo
+       PTR     sys_ipc
+       PTR     sys_fsync
+       PTR     sys_sigreturn
+       PTR     __sys_clone                     /* 4120 */
+       PTR     sys_setdomainname
+       PTR     sys_newuname
+       PTR     sys_ni_syscall                  /* sys_modify_ldt */
+       PTR     sys_adjtimex
+       PTR     sys_mprotect                    /* 4125 */
+       PTR     sys_sigprocmask
+       PTR     sys_ni_syscall                  /* was create_module */
+       PTR     sys_init_module
+       PTR     sys_delete_module
+       PTR     sys_ni_syscall                  /* 4130 was get_kernel_syms */
+       PTR     sys_quotactl
+       PTR     sys_getpgid
+       PTR     sys_fchdir
+       PTR     sys_bdflush
+       PTR     sys_sysfs                       /* 4135 */
+       PTR     sys_personality
+       PTR     sys_ni_syscall                  /* for afs_syscall */
+       PTR     sys_setfsuid
+       PTR     sys_setfsgid
+       PTR     sys_llseek                      /* 4140 */
+       PTR     sys_getdents
+       PTR     sys_select
+       PTR     sys_flock
+       PTR     sys_msync
+       PTR     sys_readv                       /* 4145 */
+       PTR     sys_writev
+       PTR     sys_cacheflush
+       PTR     sys_cachectl
+       PTR     sys_sysmips
+       PTR     sys_ni_syscall                  /* 4150 */
+       PTR     sys_getsid
+       PTR     sys_fdatasync
+       PTR     sys_sysctl
+       PTR     sys_mlock
+       PTR     sys_munlock                     /* 4155 */
+       PTR     sys_mlockall
+       PTR     sys_munlockall
+       PTR     sys_sched_setparam
+       PTR     sys_sched_getparam
+       PTR     sys_sched_setscheduler          /* 4160 */
+       PTR     sys_sched_getscheduler
+       PTR     sys_sched_yield
+       PTR     sys_sched_get_priority_max
+       PTR     sys_sched_get_priority_min
+       PTR     sys_sched_rr_get_interval       /* 4165 */
+       PTR     sys_nanosleep
+       PTR     sys_mremap
+       PTR     sys_accept
+       PTR     sys_bind
+       PTR     sys_connect                     /* 4170 */
+       PTR     sys_getpeername
+       PTR     sys_getsockname
+       PTR     sys_getsockopt
+       PTR     sys_listen
+       PTR     sys_recv                        /* 4175 */
+       PTR     sys_recvfrom
+       PTR     sys_recvmsg
+       PTR     sys_send
+       PTR     sys_sendmsg
+       PTR     sys_sendto                      /* 4180 */
+       PTR     sys_setsockopt
+       PTR     sys_shutdown
+       PTR     sys_socket
+       PTR     sys_socketpair
+       PTR     sys_setresuid                   /* 4185 */
+       PTR     sys_getresuid
+       PTR     sys_ni_syscall                  /* was sys_query_module */
+       PTR     sys_poll
+       PTR     sys_ni_syscall                  /* was nfsservctl */
+       PTR     sys_setresgid                   /* 4190 */
+       PTR     sys_getresgid
+       PTR     sys_prctl
+       PTR     sys_rt_sigreturn
+       PTR     sys_rt_sigaction
+       PTR     sys_rt_sigprocmask              /* 4195 */
+       PTR     sys_rt_sigpending
+       PTR     sys_rt_sigtimedwait
+       PTR     sys_rt_sigqueueinfo
+       PTR     sys_rt_sigsuspend
+       PTR     sys_pread64                     /* 4200 */
+       PTR     sys_pwrite64
+       PTR     sys_chown
+       PTR     sys_getcwd
+       PTR     sys_capget
+       PTR     sys_capset                      /* 4205 */
+       PTR     sys_sigaltstack
+       PTR     sys_sendfile
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall
+       PTR     sys_mips_mmap2                  /* 4210 */
+       PTR     sys_truncate64
+       PTR     sys_ftruncate64
+       PTR     sys_stat64
+       PTR     sys_lstat64
+       PTR     sys_fstat64                     /* 4215 */
+       PTR     sys_pivot_root
+       PTR     sys_mincore
+       PTR     sys_madvise
+       PTR     sys_getdents64
+       PTR     sys_fcntl64                     /* 4220 */
+       PTR     sys_ni_syscall
+       PTR     sys_gettid
+       PTR     sys_readahead
+       PTR     sys_setxattr
+       PTR     sys_lsetxattr                   /* 4225 */
+       PTR     sys_fsetxattr
+       PTR     sys_getxattr
+       PTR     sys_lgetxattr
+       PTR     sys_fgetxattr
+       PTR     sys_listxattr                   /* 4230 */
+       PTR     sys_llistxattr
+       PTR     sys_flistxattr
+       PTR     sys_removexattr
+       PTR     sys_lremovexattr
+       PTR     sys_fremovexattr                /* 4235 */
+       PTR     sys_tkill
+       PTR     sys_sendfile64
+       PTR     sys_futex
 #ifdef CONFIG_MIPS_MT_FPAFF
        /*
         * For FPU affinity scheduling on MIPS MT processors, we need to
@@ -480,132 +449,117 @@ einval: li      v0, -ENOSYS
         * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
         * atm.
         */
-       sys     mipsmt_sys_sched_setaffinity    3
-       sys     mipsmt_sys_sched_getaffinity    3
+       PTR     mipsmt_sys_sched_setaffinity
+       PTR     mipsmt_sys_sched_getaffinity
 #else
-       sys     sys_sched_setaffinity   3
-       sys     sys_sched_getaffinity   3       /* 4240 */
+       PTR     sys_sched_setaffinity
+       PTR     sys_sched_getaffinity           /* 4240 */
 #endif /* CONFIG_MIPS_MT_FPAFF */
-       sys     sys_io_setup            2
-       sys     sys_io_destroy          1
-       sys     sys_io_getevents        5
-       sys     sys_io_submit           3
-       sys     sys_io_cancel           3       /* 4245 */
-       sys     sys_exit_group          1
-       sys     sys_lookup_dcookie      4
-       sys     sys_epoll_create        1
-       sys     sys_epoll_ctl           4
-       sys     sys_epoll_wait          4       /* 4250 */
-       sys     sys_remap_file_pages    5
-       sys     sys_set_tid_address     1
-       sys     sys_restart_syscall     0
-       sys     sys_fadvise64_64        7
-       sys     sys_statfs64            3       /* 4255 */
-       sys     sys_fstatfs64           2
-       sys     sys_timer_create        3
-       sys     sys_timer_settime       4
-       sys     sys_timer_gettime       2
-       sys     sys_timer_getoverrun    1       /* 4260 */
-       sys     sys_timer_delete        1
-       sys     sys_clock_settime       2
-       sys     sys_clock_gettime       2
-       sys     sys_clock_getres        2
-       sys     sys_clock_nanosleep     4       /* 4265 */
-       sys     sys_tgkill              3
-       sys     sys_utimes              2
-       sys     sys_mbind               4
-       sys     sys_ni_syscall          0       /* sys_get_mempolicy */
-       sys     sys_ni_syscall          0       /* 4270 sys_set_mempolicy */
-       sys     sys_mq_open             4
-       sys     sys_mq_unlink           1
-       sys     sys_mq_timedsend        5
-       sys     sys_mq_timedreceive     5
-       sys     sys_mq_notify           2       /* 4275 */
-       sys     sys_mq_getsetattr       3
-       sys     sys_ni_syscall          0       /* sys_vserver */
-       sys     sys_waitid              5
-       sys     sys_ni_syscall          0       /* available, was setaltroot */
-       sys     sys_add_key             5       /* 4280 */
-       sys     sys_request_key         4
-       sys     sys_keyctl              5
-       sys     sys_set_thread_area     1
-       sys     sys_inotify_init        0
-       sys     sys_inotify_add_watch   3       /* 4285 */
-       sys     sys_inotify_rm_watch    2
-       sys     sys_migrate_pages       4
-       sys     sys_openat              4
-       sys     sys_mkdirat             3
-       sys     sys_mknodat             4       /* 4290 */
-       sys     sys_fchownat            5
-       sys     sys_futimesat           3
-       sys     sys_fstatat64           4
-       sys     sys_unlinkat            3
-       sys     sys_renameat            4       /* 4295 */
-       sys     sys_linkat              5
-       sys     sys_symlinkat           3
-       sys     sys_readlinkat          4
-       sys     sys_fchmodat            3
-       sys     sys_faccessat           3       /* 4300 */
-       sys     sys_pselect6            6
-       sys     sys_ppoll               5
-       sys     sys_unshare             1
-       sys     sys_splice              6
-       sys     sys_sync_file_range     7       /* 4305 */
-       sys     sys_tee                 4
-       sys     sys_vmsplice            4
-       sys     sys_move_pages          6
-       sys     sys_set_robust_list     2
-       sys     sys_get_robust_list     3       /* 4310 */
-       sys     sys_kexec_load          4
-       sys     sys_getcpu              3
-       sys     sys_epoll_pwait         6
-       sys     sys_ioprio_set          3
-       sys     sys_ioprio_get          2       /* 4315 */
-       sys     sys_utimensat           4
-       sys     sys_signalfd            3
-       sys     sys_ni_syscall          0       /* was timerfd */
-       sys     sys_eventfd             1
-       sys     sys_fallocate           6       /* 4320 */
-       sys     sys_timerfd_create      2
-       sys     sys_timerfd_gettime     2
-       sys     sys_timerfd_settime     4
-       sys     sys_signalfd4           4
-       sys     sys_eventfd2            2       /* 4325 */
-       sys     sys_epoll_create1       1
-       sys     sys_dup3                3
-       sys     sys_pipe2               2
-       sys     sys_inotify_init1       1
-       sys     sys_preadv              6       /* 4330 */
-       sys     sys_pwritev             6
-       sys     sys_rt_tgsigqueueinfo   4
-       sys     sys_perf_event_open     5
-       sys     sys_accept4             4
-       sys     sys_recvmmsg            5       /* 4335 */
-       sys     sys_fanotify_init       2
-       sys     sys_fanotify_mark       6
-       sys     sys_prlimit64           4
-       sys     sys_name_to_handle_at   5
-       sys     sys_open_by_handle_at   3       /* 4340 */
-       sys     sys_clock_adjtime       2
-       sys     sys_syncfs              1
-       sys     sys_sendmmsg            4
-       sys     sys_setns               2
-       sys     sys_process_vm_readv    6       /* 4345 */
-       sys     sys_process_vm_writev   6
-       sys     sys_kcmp                5
-       sys     sys_finit_module        3
-       .endm
-
-       /* We pre-compute the number of _instruction_ bytes needed to
-          load or store the arguments 6-8. Negative values are ignored. */
-
-       .macro  sys function, nargs
-       PTR     \function
-       LONG    (\nargs << 2) - (5 << 2)
-       .endm
-
-       .align  3
-       .type   sys_call_table,@object
-EXPORT(sys_call_table)
-       syscalltable
-       .size   sys_call_table, . - sys_call_table
+       PTR     sys_io_setup
+       PTR     sys_io_destroy
+       PTR     sys_io_getevents
+       PTR     sys_io_submit
+       PTR     sys_io_cancel                   /* 4245 */
+       PTR     sys_exit_group
+       PTR     sys_lookup_dcookie
+       PTR     sys_epoll_create
+       PTR     sys_epoll_ctl
+       PTR     sys_epoll_wait                  /* 4250 */
+       PTR     sys_remap_file_pages
+       PTR     sys_set_tid_address
+       PTR     sys_restart_syscall
+       PTR     sys_fadvise64_64
+       PTR     sys_statfs64                    /* 4255 */
+       PTR     sys_fstatfs64
+       PTR     sys_timer_create
+       PTR     sys_timer_settime
+       PTR     sys_timer_gettime
+       PTR     sys_timer_getoverrun            /* 4260 */
+       PTR     sys_timer_delete
+       PTR     sys_clock_settime
+       PTR     sys_clock_gettime
+       PTR     sys_clock_getres
+       PTR     sys_clock_nanosleep             /* 4265 */
+       PTR     sys_tgkill
+       PTR     sys_utimes
+       PTR     sys_mbind
+       PTR     sys_ni_syscall                  /* sys_get_mempolicy */
+       PTR     sys_ni_syscall                  /* 4270 sys_set_mempolicy */
+       PTR     sys_mq_open
+       PTR     sys_mq_unlink
+       PTR     sys_mq_timedsend
+       PTR     sys_mq_timedreceive
+       PTR     sys_mq_notify                   /* 4275 */
+       PTR     sys_mq_getsetattr
+       PTR     sys_ni_syscall                  /* sys_vserver */
+       PTR     sys_waitid
+       PTR     sys_ni_syscall                  /* available, was setaltroot */
+       PTR     sys_add_key                     /* 4280 */
+       PTR     sys_request_key
+       PTR     sys_keyctl
+       PTR     sys_set_thread_area
+       PTR     sys_inotify_init
+       PTR     sys_inotify_add_watch           /* 4285 */
+       PTR     sys_inotify_rm_watch
+       PTR     sys_migrate_pages
+       PTR     sys_openat
+       PTR     sys_mkdirat
+       PTR     sys_mknodat                     /* 4290 */
+       PTR     sys_fchownat
+       PTR     sys_futimesat
+       PTR     sys_fstatat64
+       PTR     sys_unlinkat
+       PTR     sys_renameat                    /* 4295 */
+       PTR     sys_linkat
+       PTR     sys_symlinkat
+       PTR     sys_readlinkat
+       PTR     sys_fchmodat
+       PTR     sys_faccessat                   /* 4300 */
+       PTR     sys_pselect6
+       PTR     sys_ppoll
+       PTR     sys_unshare
+       PTR     sys_splice
+       PTR     sys_sync_file_range             /* 4305 */
+       PTR     sys_tee
+       PTR     sys_vmsplice
+       PTR     sys_move_pages
+       PTR     sys_set_robust_list
+       PTR     sys_get_robust_list             /* 4310 */
+       PTR     sys_kexec_load
+       PTR     sys_getcpu
+       PTR     sys_epoll_pwait
+       PTR     sys_ioprio_set
+       PTR     sys_ioprio_get                  /* 4315 */
+       PTR     sys_utimensat
+       PTR     sys_signalfd
+       PTR     sys_ni_syscall                  /* was timerfd */
+       PTR     sys_eventfd
+       PTR     sys_fallocate                   /* 4320 */
+       PTR     sys_timerfd_create
+       PTR     sys_timerfd_gettime
+       PTR     sys_timerfd_settime
+       PTR     sys_signalfd4
+       PTR     sys_eventfd2                    /* 4325 */
+       PTR     sys_epoll_create1
+       PTR     sys_dup3
+       PTR     sys_pipe2
+       PTR     sys_inotify_init1
+       PTR     sys_preadv                      /* 4330 */
+       PTR     sys_pwritev
+       PTR     sys_rt_tgsigqueueinfo
+       PTR     sys_perf_event_open
+       PTR     sys_accept4
+       PTR     sys_recvmmsg                    /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
+       PTR     sys_name_to_handle_at
+       PTR     sys_open_by_handle_at           /* 4340 */
+       PTR     sys_clock_adjtime
+       PTR     sys_syncfs
+       PTR     sys_sendmmsg
+       PTR     sys_setns
+       PTR     sys_process_vm_readv            /* 4345 */
+       PTR     sys_process_vm_writev
+       PTR     sys_kcmp
+       PTR     sys_finit_module
index be6627ead619e72b35bea9f8d031af3b21a2b06a..57e3742fec59a19083eb3cb5f802b9e43a90b66b 100644 (file)
@@ -114,7 +114,8 @@ illegal_syscall:
        END(handle_sys64)
 
        .align  3
-sys_call_table:
+       .type   sys_call_table, @object
+EXPORT(sys_call_table)
        PTR     sys_read                        /* 5000 */
        PTR     sys_write
        PTR     sys_open
index cab150789c8d8412409506c99143a45f04717e80..2f48f5934399e3b48a14cd88f2bf84fba0d46894 100644 (file)
@@ -103,6 +103,7 @@ not_n32_scall:
 
        END(handle_sysn32)
 
+       .type   sysn32_call_table, @object
 EXPORT(sysn32_call_table)
        PTR     sys_read                        /* 6000 */
        PTR     sys_write
index 37605dc8eef7a9c72d5743ad3ad5d1f50088fdfb..f1acdb429f4fa1d89ee8db664f5fd25005c978a1 100644 (file)
@@ -53,7 +53,7 @@ NESTED(handle_sys, PT_SIZE, sp)
        sll     a3, a3, 0
 
        dsll    t0, v0, 3               # offset into table
-       ld      t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
+       ld      t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -168,7 +168,7 @@ LEAF(sys32_syscall)
        beqz    t0, einval              # do not recurse
        dsll    t1, t0, 3
        beqz    v0, einval
-       ld      t2, sys_call_table(t1)          # syscall routine
+       ld      t2, sys32_call_table(t1)                # syscall routine
 
        move    a0, a1                  # shift argument registers
        move    a1, a2
@@ -190,8 +190,8 @@ einval: li  v0, -ENOSYS
        END(sys32_syscall)
 
        .align  3
-       .type   sys_call_table,@object
-sys_call_table:
+       .type   sys32_call_table,@object
+EXPORT(sys32_call_table)
        PTR     sys32_syscall                   /* 4000 */
        PTR     sys_exit
        PTR     __sys_fork
@@ -541,4 +541,4 @@ sys_call_table:
        PTR     compat_sys_process_vm_writev
        PTR     sys_kcmp
        PTR     sys_finit_module
-       .size   sys_call_table,.-sys_call_table
+       .size   sys32_call_table,.-sys32_call_table
index c538d6e01b7b744cb4af330a5de15b78963cfa7f..a842154d57dc466eaba000039b3fd5e436c6cb69 100644 (file)
@@ -300,12 +300,13 @@ static void __init bootmem_init(void)
        int i;
 
        /*
-        * Init any data related to initrd. It's a nop if INITRD is
-        * not selected. Once that done we can determine the low bound
-        * of usable memory.
+        * Sanity check any INITRD first. We don't take it into account
+        * for bootmem setup initially, rely on the end-of-kernel-code
+        * as our memory range starting point. Once bootmem is inited we
+        * will reserve the area used for the initrd.
         */
-       reserved_end = max(init_initrd(),
-                          (unsigned long) PFN_UP(__pa_symbol(&_end)));
+       init_initrd();
+       reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
 
        /*
         * max_low_pfn is not a number of pages. The number of pages
@@ -362,6 +363,14 @@ static void __init bootmem_init(void)
                max_low_pfn = PFN_DOWN(HIGHMEM_START);
        }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * mapstart should be after initrd_end
+        */
+       if (initrd_end)
+               mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
        /*
         * Initialize the boot-time allocator with low memory only.
         */
index 126da74d4c5559faf40962b80bddef9a39721757..2362665ba4965f2b3ff272a34bb5aca6360f8897 100644 (file)
@@ -136,10 +136,10 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
 {
        if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
                        "smp_ipi0", NULL))
-               panic("Can't request IPI0 interrupt\n");
+               panic("Can't request IPI0 interrupt");
        if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
                        "smp_ipi1", NULL))
-               panic("Can't request IPI1 interrupt\n");
+               panic("Can't request IPI1 interrupt");
 }
 
 /*
index 5c208ed8f8561b461e3b2a8b1f0e49a8a458f8d2..0a022ee33b2a15c5c9722e2e9aa5504358ba05f4 100644 (file)
@@ -150,7 +150,6 @@ asmlinkage void start_secondary(void)
 void __irq_entry smp_call_function_interrupt(void)
 {
        irq_enter();
-       generic_smp_call_function_single_interrupt();
        generic_smp_call_function_interrupt();
        irq_exit();
 }
index 524841f0280370600966d20f38b41d0eefdb4744..f9c8746be8d66d78b3ad1fc500d72674d486cdb9 100644 (file)
@@ -330,6 +330,7 @@ void show_regs(struct pt_regs *regs)
 void show_registers(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
+       mm_segment_t old_fs = get_fs();
 
        __show_regs(regs);
        print_modules();
@@ -344,9 +345,13 @@ void show_registers(struct pt_regs *regs)
                        printk("*HwTLS: %0*lx\n", field, tls);
        }
 
+       if (!user_mode(regs))
+               /* Necessary for getting the correct stack content */
+               set_fs(KERNEL_DS);
        show_stacktrace(current, regs);
        show_code((unsigned int __user *) regs->cp0_epc);
        printk("\n");
+       set_fs(old_fs);
 }
 
 static int regs_to_trapnr(struct pt_regs *regs)
@@ -366,7 +371,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
 
        oops_enter();
 
-       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
+       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+                      SIGSEGV) == NOTIFY_STOP)
                sig = 0;
 
        console_verbose();
@@ -457,8 +463,8 @@ asmlinkage void do_be(struct pt_regs *regs)
        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
               data ? "Data" : "Instruction",
               field, regs->cp0_epc, field, regs->regs[31]);
-       if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+                      SIGBUS) == NOTIFY_STOP)
                goto out;
 
        die_if_kernel("Oops", regs);
@@ -727,8 +733,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
        siginfo_t info = {0};
 
        prev_state = exception_enter();
-       if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+                      SIGFPE) == NOTIFY_STOP)
                goto out;
        die_if_kernel("FP exception in kernel code", regs);
 
@@ -798,7 +804,8 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
                return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-       if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+       if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+                      SIGTRAP) == NOTIFY_STOP)
                return;
 
        /*
@@ -892,12 +899,14 @@ asmlinkage void do_bp(struct pt_regs *regs)
         */
        switch (bcode) {
        case BRK_KPROBE_BP:
-               if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+               if (notify_die(DIE_BREAK, "debug", regs, bcode,
+                              regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
                        goto out;
                else
                        break;
        case BRK_KPROBE_SSTEPBP:
-               if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+               if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+                              regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
                        goto out;
                else
                        break;
@@ -961,8 +970,8 @@ asmlinkage void do_ri(struct pt_regs *regs)
        int status = -1;
 
        prev_state = exception_enter();
-       if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+                      SIGILL) == NOTIFY_STOP)
                goto out;
 
        die_if_kernel("Reserved instruction in kernel code", regs);
@@ -1488,10 +1497,14 @@ int register_nmi_notifier(struct notifier_block *nb)
 
 void __noreturn nmi_exception_handler(struct pt_regs *regs)
 {
+       char str[100];
+
        raw_notifier_call_chain(&nmi_chain, 0, regs);
        bust_spinlocks(1);
-       printk("NMI taken!!!!\n");
-       die("NMI", regs);
+       snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+                smp_processor_id(), regs->cp0_epc);
+       regs->cp0_epc = read_c0_errorepc();
+       die(str, regs);
 }
 
 #define VECTORSPACING 0x100    /* for EI/VI mode */
@@ -1554,7 +1567,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        unsigned char *b;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
-       BUG_ON((n < 0) && (n > 9));
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
index eb3e186596304f480987400ae731f30726923564..85685e1cdb89479ac57de4e3614e69bf6b7cbb2c 100644 (file)
@@ -390,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
                ret = of_irq_to_resource_table(eiu_node,
                                                ltq_eiu_irq, exin_avail);
                if (ret != exin_avail)
-                       panic("failed to load external irq resources\n");
+                       panic("failed to load external irq resources");
 
                if (request_mem_region(res.start, resource_size(&res),
                                                        res.name) < 0)
index c24924fe087da2cf05f7f1828d183c79e8f810ad..51804b10a0360317392ebdbc902e59b8e7f4343d 100644 (file)
@@ -128,7 +128,7 @@ static int pmu_enable(struct clk *clk)
        do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
 
        if (!retry)
-               panic("activating PMU module failed!\n");
+               panic("activating PMU module failed!");
 
        return 0;
 }
index 627883bc6d5f29f4a4510373ddb8e552398cb5ed..62ffd20ea86909be81906691dd1531a35733c410 100644 (file)
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
 
 static inline void local_r4k___flush_cache_all(void * args)
 {
-#if defined(CONFIG_CPU_LOONGSON2)
-       r4k_blast_scache();
-       return;
-#endif
-       r4k_blast_dcache();
-       r4k_blast_icache();
-
        switch (current_cpu_type()) {
+       case CPU_LOONGSON2:
        case CPU_R4000SC:
        case CPU_R4000MC:
        case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
        case CPU_R10000:
        case CPU_R12000:
        case CPU_R14000:
+               /*
+                * These caches are inclusive caches, that is, if something
+                * is not cached in the S-cache, we know it also won't be
+                * in one of the primary caches.
+                */
                r4k_blast_scache();
+               break;
+
+       default:
+               r4k_blast_dcache();
+               r4k_blast_icache();
+               break;
        }
 }
 
@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
 
        if (end - start > icache_size)
                r4k_blast_icache();
-       else
-               protected_blast_icache_range(start, end);
+       else {
+               switch (boot_cpu_type()) {
+               case CPU_LOONGSON2:
+                       protected_blast_icache_range(start, end);
+                       break;
+
+               default:
+                       protected_loongson23_blast_icache_range(start, end);
+                       break;
+               }
+       }
 }
 
 static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -609,6 +623,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
                        r4k_blast_scache();
                else
                        blast_scache_range(addr, addr + size);
+               preempt_enable();
                __sync();
                return;
        }
@@ -650,6 +665,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                         */
                        blast_inv_scache_range(addr, addr + size);
                }
+               preempt_enable();
                __sync();
                return;
        }
@@ -1107,15 +1123,14 @@ static void probe_pcache(void)
        case CPU_ALCHEMY:
                c->icache.flags |= MIPS_CACHE_IC_F_DC;
                break;
-       }
 
-#ifdef CONFIG_CPU_LOONGSON2
-       /*
-        * LOONGSON2 has 4 way icache, but when using indexed cache op,
-        * one op will act on all 4 ways
-        */
-       c->icache.ways = 1;
-#endif
+       case CPU_LOONGSON2:
+               /*
+                * LOONGSON2 has 4 way icache, but when using indexed cache op,
+                * one op will act on all 4 ways
+                */
+               c->icache.ways = 1;
+       }
 
        printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
               icache_size >> 10,
@@ -1191,7 +1206,6 @@ static int probe_scache(void)
        return 1;
 }
 
-#if defined(CONFIG_CPU_LOONGSON2)
 static void __init loongson2_sc_init(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -1207,7 +1221,6 @@ static void __init loongson2_sc_init(void)
 
        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
 }
-#endif
 
 extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
@@ -1257,11 +1270,10 @@ static void setup_scache(void)
 #endif
                return;
 
-#if defined(CONFIG_CPU_LOONGSON2)
        case CPU_LOONGSON2:
                loongson2_sc_init();
                return;
-#endif
+
        case CPU_XLP:
                /* don't need to worry about L2, fully coherent */
                return;
index f25a7e9f8cbc56e320df0c6c51cc7bf8a45cb40e..2e9418562258754dacb511341b499965022760aa 100644 (file)
@@ -297,7 +297,6 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
 static void mips_dma_sync_single_for_device(struct device *dev,
        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 {
-       plat_extra_sync_for_device(dev);
        if (!plat_device_is_coherent(dev))
                __dma_sync(dma_addr_to_page(dev, dma_handle),
                           dma_handle & ~PAGE_MASK, size, direction);
@@ -308,12 +307,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (cpu_needs_post_dma_flush(dev))
+       if (cpu_needs_post_dma_flush(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,17 +318,15 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return plat_dma_mapping_error(dev, dma_addr);
+       return 0;
 }
 
 int mips_dma_supported(struct device *dev, u64 mask)
@@ -344,7 +339,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
        BUG_ON(direction == DMA_NONE);
 
-       plat_extra_sync_for_device(dev);
        if (!plat_device_is_coherent(dev))
                __dma_sync_virtual(vaddr, size, direction);
 }
index 79bca3130bd15f51bccae23012fa9a821cadb653..30a494db99c2a0eb4d51aa64ca410de956801837 100644 (file)
 
 #define FASTPATH_SIZE  128
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 LEAF(tlbmiss_handler_setup_pgd)
        .space          16 * 4
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
-#endif
 
 LEAF(handle_tlbm)
        .space          FASTPATH_SIZE * 4
index bb3a5f643e974a27a7d81246d4d7bb6b5f13a390..da3b0b9c9eae0a9800dfbbd20f6717709e72bfed 100644 (file)
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
 
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-#if defined(CONFIG_CPU_LOONGSON2)
 /*
  * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
  * unfortrunately, itlb is not totally transparent to software.
  */
-#define FLUSH_ITLB write_c0_diag(4);
-
-#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
-
-#else
-
-#define FLUSH_ITLB
-#define FLUSH_ITLB_VM(vma)
+static inline void flush_itlb(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_LOONGSON2:
+               write_c0_diag(4);
+               break;
+       default:
+               break;
+       }
+}
 
-#endif
+static inline void flush_itlb_vm(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_EXEC)
+               flush_itlb();
+}
 
 void local_flush_tlb_all(void)
 {
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
        }
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                } else {
                        drop_mmu_context(mm, cpu);
                }
-               FLUSH_ITLB;
+               flush_itlb();
                EXIT_CRITICAL(flags);
        }
 }
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        } else {
                local_flush_tlb_all();
        }
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 
@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 
        finish:
                write_c0_entryhi(oldpid);
-               FLUSH_ITLB_VM(vma);
+               flush_itlb_vm(vma);
                EXIT_CRITICAL(flags);
        }
 }
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
                tlbw_use_hazard();
        }
        write_c0_entryhi(oldpid);
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 
@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
                        tlb_write_indexed();
        }
        tlbw_use_hazard();
-       FLUSH_ITLB_VM(vma);
+       flush_itlb_vm(vma);
        EXIT_CRITICAL(flags);
 }
 
index 9bb3a9363b0618df3e19a43fafc68eb91dff18ba..183f2b583e4dbc7798411c4fd8c6c3927bff607f 100644 (file)
@@ -340,10 +340,6 @@ static struct work_registers build_get_work_registers(u32 **p)
 {
        struct work_registers r;
 
-       int smp_processor_id_reg;
-       int smp_processor_id_sel;
-       int smp_processor_id_shift;
-
        if (scratch_reg >= 0) {
                /* Save in CPU local C0_KScratch? */
                UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
@@ -354,25 +350,9 @@ static struct work_registers build_get_work_registers(u32 **p)
        }
 
        if (num_possible_cpus() > 1) {
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-               smp_processor_id_shift = 51;
-               smp_processor_id_reg = 20; /* XContext */
-               smp_processor_id_sel = 0;
-#else
-# ifdef CONFIG_32BIT
-               smp_processor_id_shift = 25;
-               smp_processor_id_reg = 4; /* Context */
-               smp_processor_id_sel = 0;
-# endif
-# ifdef CONFIG_64BIT
-               smp_processor_id_shift = 26;
-               smp_processor_id_reg = 4; /* Context */
-               smp_processor_id_sel = 0;
-# endif
-#endif
                /* Get smp_processor_id */
-               UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
-               UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
+               UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
+               UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
 
                /* handler_reg_save index in K0 */
                UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
@@ -819,11 +799,11 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        if (pgd_reg != -1) {
                /* pgd is in pgd_reg */
                UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
        } else {
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
                /*
                 * &pgd << 11 stored in CONTEXT [23..63].
                 */
@@ -835,30 +815,18 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                /* 1 0  1 0 1  << 6  xkphys cached */
                uasm_i_ori(p, ptr, ptr, 0x540);
                uasm_i_drotr(p, ptr, ptr, 11);
-       }
 #elif defined(CONFIG_SMP)
-# ifdef         CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC uses TCBind value as "CPU" index
-        */
-       uasm_i_mfc0(p, ptr, C0_TCBIND);
-       uasm_i_dsrl_safe(p, ptr, ptr, 19);
-# else
-       /*
-        * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
-        * stored in CONTEXT.
-        */
-       uasm_i_dmfc0(p, ptr, C0_CONTEXT);
-       uasm_i_dsrl_safe(p, ptr, ptr, 23);
-# endif
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_daddu(p, ptr, ptr, tmp);
-       uasm_i_dmfc0(p, tmp, C0_BADVADDR);
-       uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+               UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
+               uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+               UASM_i_LA_mostly(p, tmp, pgdc);
+               uasm_i_daddu(p, ptr, ptr, tmp);
+               uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+               uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 #else
-       UASM_i_LA_mostly(p, ptr, pgdc);
-       uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+               UASM_i_LA_mostly(p, ptr, pgdc);
+               uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 #endif
+       }
 
        uasm_l_vmalloc_done(l, *p);
 
@@ -953,31 +921,25 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 static void __maybe_unused
 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 {
-       long pgdc = (long)pgd_current;
+       if (pgd_reg != -1) {
+               /* pgd is in pgd_reg */
+               uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
+               uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+       } else {
+               long pgdc = (long)pgd_current;
 
-       /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
+               /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC uses TCBind value as "CPU" index
-        */
-       uasm_i_mfc0(p, ptr, C0_TCBIND);
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_srl(p, ptr, ptr, 19);
-#else
-       /*
-        * smp_processor_id() << 2 is stored in CONTEXT.
-        */
-       uasm_i_mfc0(p, ptr, C0_CONTEXT);
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_srl(p, ptr, ptr, 23);
-#endif
-       uasm_i_addu(p, ptr, tmp, ptr);
+               uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
+               UASM_i_LA_mostly(p, tmp, pgdc);
+               uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+               uasm_i_addu(p, ptr, tmp, ptr);
 #else
-       UASM_i_LA_mostly(p, ptr, pgdc);
+               UASM_i_LA_mostly(p, ptr, pgdc);
 #endif
-       uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
-       uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+               uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+               uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+       }
        uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
        uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
        uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
@@ -1349,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
         * need three, with the second nop'ed and the third being
         * unused.
         */
-       /* Loongson2 ebase is different than r4k, we have more space */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
-       if ((p - tlb_handler) > 64)
-               panic("TLB refill handler space exceeded");
-#else
-       if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
-           || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
-               && uasm_insn_has_bdelay(relocs,
-                                       tlb_handler + MIPS64_REFILL_INSNS - 3)))
-               panic("TLB refill handler space exceeded");
-#endif
-
-       /*
-        * Now fold the handler in the TLB refill handler space.
-        */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
-       f = final_handler;
-       /* Simplest case, just copy the handler. */
-       uasm_copy_handler(relocs, labels, tlb_handler, p, f);
-       final_len = p - tlb_handler;
-#else /* CONFIG_64BIT */
-       f = final_handler + MIPS64_REFILL_INSNS;
-       if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
-               /* Just copy the handler. */
-               uasm_copy_handler(relocs, labels, tlb_handler, p, f);
-               final_len = p - tlb_handler;
-       } else {
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-               const enum label_id ls = label_tlb_huge_update;
-#else
-               const enum label_id ls = label_vmalloc;
-#endif
-               u32 *split;
-               int ov = 0;
-               int i;
-
-               for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
-                       ;
-               BUG_ON(i == ARRAY_SIZE(labels));
-               split = labels[i].addr;
-
-               /*
-                * See if we have overflown one way or the other.
-                */
-               if (split > tlb_handler + MIPS64_REFILL_INSNS ||
-                   split < p - MIPS64_REFILL_INSNS)
-                       ov = 1;
-
-               if (ov) {
+       switch (boot_cpu_type()) {
+       default:
+               if (sizeof(long) == 4) {
+       case CPU_LOONGSON2:
+               /* Loongson2 ebase is different than r4k, we have more space */
+                       if ((p - tlb_handler) > 64)
+                               panic("TLB refill handler space exceeded");
                        /*
-                        * Split two instructions before the end.  One
-                        * for the branch and one for the instruction
-                        * in the delay slot.
+                        * Now fold the handler in the TLB refill handler space.
                         */
-                       split = tlb_handler + MIPS64_REFILL_INSNS - 2;
-
+                       f = final_handler;
+                       /* Simplest case, just copy the handler. */
+                       uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+                       final_len = p - tlb_handler;
+                       break;
+               } else {
+                       if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+                           || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+                               && uasm_insn_has_bdelay(relocs,
+                                                       tlb_handler + MIPS64_REFILL_INSNS - 3)))
+                               panic("TLB refill handler space exceeded");
                        /*
-                        * If the branch would fall in a delay slot,
-                        * we must back up an additional instruction
-                        * so that it is no longer in a delay slot.
+                        * Now fold the handler in the TLB refill handler space.
                         */
-                       if (uasm_insn_has_bdelay(relocs, split - 1))
-                               split--;
-               }
-               /* Copy first part of the handler. */
-               uasm_copy_handler(relocs, labels, tlb_handler, split, f);
-               f += split - tlb_handler;
-
-               if (ov) {
-                       /* Insert branch. */
-                       uasm_l_split(&l, final_handler);
-                       uasm_il_b(&f, &r, label_split);
-                       if (uasm_insn_has_bdelay(relocs, split))
-                               uasm_i_nop(&f);
-                       else {
-                               uasm_copy_handler(relocs, labels,
-                                                 split, split + 1, f);
-                               uasm_move_labels(labels, f, f + 1, -1);
-                               f++;
-                               split++;
+                       f = final_handler + MIPS64_REFILL_INSNS;
+                       if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
+                               /* Just copy the handler. */
+                               uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+                               final_len = p - tlb_handler;
+                       } else {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+                               const enum label_id ls = label_tlb_huge_update;
+#else
+                               const enum label_id ls = label_vmalloc;
+#endif
+                               u32 *split;
+                               int ov = 0;
+                               int i;
+
+                               for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+                                       ;
+                               BUG_ON(i == ARRAY_SIZE(labels));
+                               split = labels[i].addr;
+
+                               /*
+                                * See if we have overflown one way or the other.
+                                */
+                               if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+                                   split < p - MIPS64_REFILL_INSNS)
+                                       ov = 1;
+
+                               if (ov) {
+                                       /*
+                                        * Split two instructions before the end.  One
+                                        * for the branch and one for the instruction
+                                        * in the delay slot.
+                                        */
+                                       split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+                                       /*
+                                        * If the branch would fall in a delay slot,
+                                        * we must back up an additional instruction
+                                        * so that it is no longer in a delay slot.
+                                        */
+                                       if (uasm_insn_has_bdelay(relocs, split - 1))
+                                               split--;
+                               }
+                               /* Copy first part of the handler. */
+                               uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+                               f += split - tlb_handler;
+
+                               if (ov) {
+                                       /* Insert branch. */
+                                       uasm_l_split(&l, final_handler);
+                                       uasm_il_b(&f, &r, label_split);
+                                       if (uasm_insn_has_bdelay(relocs, split))
+                                               uasm_i_nop(&f);
+                                       else {
+                                               uasm_copy_handler(relocs, labels,
+                                                                 split, split + 1, f);
+                                               uasm_move_labels(labels, f, f + 1, -1);
+                                               f++;
+                                               split++;
+                                       }
+                               }
+
+                               /* Copy the rest of the handler. */
+                               uasm_copy_handler(relocs, labels, split, p, final_handler);
+                               final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+                                           (p - split);
                        }
                }
-
-               /* Copy the rest of the handler. */
-               uasm_copy_handler(relocs, labels, split, p, final_handler);
-               final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
-                           (p - split);
+               break;
        }
-#endif /* CONFIG_64BIT */
 
        uasm_resolve_relocs(relocs, labels);
        pr_debug("Wrote TLB refill handler (%u instructions).\n",
@@ -1451,28 +1418,30 @@ static void build_r4000_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
 
-static void build_r4000_setup_pgd(void)
+static void build_setup_pgd(void)
 {
        const int a0 = 4;
-       const int a1 = 5;
+       const int __maybe_unused a1 = 5;
+       const int __maybe_unused a2 = 6;
        u32 *p = tlbmiss_handler_setup_pgd;
        const int tlbmiss_handler_setup_pgd_size =
                tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
-       struct uasm_label *l = labels;
-       struct uasm_reloc *r = relocs;
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+       long pgdc = (long)pgd_current;
+#endif
 
        memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
                                        sizeof(tlbmiss_handler_setup_pgd[0]));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
-
        pgd_reg = allocate_kscratch();
-
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        if (pgd_reg == -1) {
+               struct uasm_label *l = labels;
+               struct uasm_reloc *r = relocs;
+
                /* PGD << 11 in c0_Context */
                /*
                 * If it is a ckseg0 address, convert to a physical
@@ -1494,6 +1463,26 @@ static void build_r4000_setup_pgd(void)
                uasm_i_jr(&p, 31);
                UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
        }
+#else
+#ifdef CONFIG_SMP
+       /* Save PGD to pgd_current[smp_processor_id()] */
+       UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
+       UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
+       UASM_i_LA_mostly(&p, a2, pgdc);
+       UASM_i_ADDU(&p, a2, a2, a1);
+       UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#else
+       UASM_i_LA_mostly(&p, a2, pgdc);
+       UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#endif /* SMP */
+       uasm_i_jr(&p, 31);
+
+       /* if pgd_reg is allocated, save PGD also to scratch register */
+       if (pgd_reg != -1)
+               UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+       else
+               uasm_i_nop(&p);
+#endif
        if (p >= tlbmiss_handler_setup_pgd_end)
                panic("tlbmiss_handler_setup_pgd space exceeded");
 
@@ -1504,7 +1493,6 @@ static void build_r4000_setup_pgd(void)
        dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
                                        tlbmiss_handler_setup_pgd_size);
 }
-#endif
 
 static void
 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
@@ -2197,10 +2185,8 @@ static void flush_tlb_handlers(void)
                           (unsigned long)handle_tlbs_end);
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm_end);
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
                           (unsigned long)tlbmiss_handler_setup_pgd_end);
-#endif
 }
 
 void build_tlb_refill_handler(void)
@@ -2232,6 +2218,7 @@ void build_tlb_refill_handler(void)
                if (!run_once) {
                        if (!cpu_has_local_ebase)
                                build_r3000_tlb_refill_handler();
+                       build_setup_pgd();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
@@ -2255,9 +2242,7 @@ void build_tlb_refill_handler(void)
        default:
                if (!run_once) {
                        scratch_reg = allocate_kscratch();
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-                       build_r4000_setup_pgd();
-#endif
+                       build_setup_pgd();
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
index c69da37346995e94a5e5d4a33dafce0585b84ed9..be4a1092fd534f23827d8f4c9503a0970141cb3e 100644 (file)
@@ -37,7 +37,6 @@
 #include <asm/irq_regs.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
-#include <asm/mips-boards/piix4.h>
 #include <asm/gt64120.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/msc01_pci.h>
index 6f8feb9efcff9e64174534233133b6d097d957fc..c0eded01fde96ef23676eabc28e72ceb286467ad 100644 (file)
@@ -245,7 +245,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
        return threadmode;
 
 unsupp:
-       panic("Unsupported CPU mask %lx\n",
+       panic("Unsupported CPU mask %lx",
                (unsigned long)cpumask_bits(wakeup_mask)[0]);
        return 0;
 }
index 07ada7f8441ead44606e732f179a9d2dc4d0e00e..df36e2327c54572cca76848547cbbbf030c554ac 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <asm/mips-boards/piix4.h>
 
 /* PCI interrupt pins */
 #define PCIA           1
@@ -53,7 +54,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 static void malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
-       static int piixirqmap[16] = {  /* PIIX PIRQC[A:D] irq mappings */
+       /* PIIX PIRQC[A:D] irq mappings */
+       static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
                0,  0,  0,  3,
                4,  5,  6,  7,
                0,  9, 10, 11,
@@ -63,11 +65,12 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
 
        /* Interrogate PIIX4 to get PCI IRQ mapping */
        for (i = 0; i <= 3; i++) {
-               pci_read_config_byte(pdev, 0x60+i, &reg_val);
-               if (reg_val & 0x80)
+               pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, &reg_val);
+               if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE)
                        pci_irq[PCIA+i] = 0;    /* Disabled */
                else
-                       pci_irq[PCIA+i] = piixirqmap[reg_val & 15];
+                       pci_irq[PCIA+i] = piixirqmap[reg_val &
+                               PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK];
        }
 
        /* Done by YAMON 2.00 onwards */
@@ -76,8 +79,9 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
                 * Set top of main memory accessible by ISA or DMA
                 * devices to 16 Mb.
                 */
-               pci_read_config_byte(pdev, 0x69, &reg_val);
-               pci_write_config_byte(pdev, 0x69, reg_val | 0xf0);
+               pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
+                               PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
        }
 }
 
@@ -93,10 +97,14 @@ static void malta_piix_func1_fixup(struct pci_dev *pdev)
                /*
                 * IDE Decode enable.
                 */
-               pci_read_config_byte(pdev, 0x41, &reg_val);
-               pci_write_config_byte(pdev, 0x41, reg_val|0x80);
-               pci_read_config_byte(pdev, 0x43, &reg_val);
-               pci_write_config_byte(pdev, 0x43, reg_val|0x80);
+               pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+                       &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+                       reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN);
+               pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+                       &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+                       reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN);
        }
 }
 
@@ -108,10 +116,12 @@ static void quirk_dlcsetup(struct pci_dev *dev)
 {
        u8 odlc, ndlc;
 
-       (void) pci_read_config_byte(dev, 0x82, &odlc);
+       (void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc);
        /* Enable passive releases and delayed transaction */
-       ndlc = odlc | 7;
-       (void) pci_write_config_byte(dev, 0x82, ndlc);
+       ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN |
+                     PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN |
+                     PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN;
+       (void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc);
 }
 
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
index 18517dd0f7090987fe182df3d2a4dbe4e62842cf..d471a26dd5f891664fbed0bcc6d330f73027cffd 100644 (file)
@@ -363,9 +363,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
        spin_lock_init(&apc->lock);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
-       if (!res)
-               return -EINVAL;
-
        apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->cfg_base))
                return PTR_ERR(apc->cfg_base);
index 65ec032fa0b442367c93b70cf8599fb5bd03fdd1..785b2659b519bce10085ee6bd42831937c616d4b 100644 (file)
@@ -362,25 +362,16 @@ static int ar724x_pci_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
-       if (!res)
-               return -EINVAL;
-
        apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->ctrl_base))
                return PTR_ERR(apc->ctrl_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
-       if (!res)
-               return -EINVAL;
-
        apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->devcfg_base))
                return PTR_ERR(apc->devcfg_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
-       if (!res)
-               return -EINVAL;
-
        apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->crp_base))
                return PTR_ERR(apc->crp_base);
index 33e7aa52d9c4451ca352221921e27c134246a52d..1bf60b12737746d19cfec9c8b0512e6c419a2a5c 100644 (file)
@@ -120,51 +120,37 @@ static void pcibios_scanbus(struct pci_controller *hose)
 #ifdef CONFIG_OF
 void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
 {
-       const __be32 *ranges;
-       int rlen;
-       int pna = of_n_addr_cells(node);
-       int np = pna + 5;
+       struct of_pci_range range;
+       struct of_pci_range_parser parser;
 
        pr_info("PCI host bridge %s ranges:\n", node->full_name);
-       ranges = of_get_property(node, "ranges", &rlen);
-       if (ranges == NULL)
-               return;
        hose->of_node = node;
 
-       while ((rlen -= np * 4) >= 0) {
-               u32 pci_space;
+       if (of_pci_range_parser_init(&parser, node))
+               return;
+
+       for_each_of_pci_range(&parser, &range) {
                struct resource *res = NULL;
-               u64 addr, size;
-
-               pci_space = be32_to_cpup(&ranges[0]);
-               addr = of_translate_address(node, ranges + 3);
-               size = of_read_number(ranges + pna + 3, 2);
-               ranges += np;
-               switch ((pci_space >> 24) & 0x3) {
-               case 1:         /* PCI IO space */
+
+               switch (range.flags & IORESOURCE_TYPE_BITS) {
+               case IORESOURCE_IO:
                        pr_info("  IO 0x%016llx..0x%016llx\n",
-                                       addr, addr + size - 1);
+                               range.cpu_addr,
+                               range.cpu_addr + range.size - 1);
                        hose->io_map_base =
-                               (unsigned long)ioremap(addr, size);
+                               (unsigned long)ioremap(range.cpu_addr,
+                                                      range.size);
                        res = hose->io_resource;
-                       res->flags = IORESOURCE_IO;
                        break;
-               case 2:         /* PCI Memory space */
-               case 3:         /* PCI 64 bits Memory space */
+               case IORESOURCE_MEM:
                        pr_info(" MEM 0x%016llx..0x%016llx\n",
-                                       addr, addr + size - 1);
+                               range.cpu_addr,
+                               range.cpu_addr + range.size - 1);
                        res = hose->mem_resource;
-                       res->flags = IORESOURCE_MEM;
                        break;
                }
-               if (res != NULL) {
-                       res->start = addr;
-                       res->name = node->full_name;
-                       res->end = res->start + size - 1;
-                       res->parent = NULL;
-                       res->sibling = NULL;
-                       res->child = NULL;
-               }
+               if (res != NULL)
+                       of_pci_range_to_resource(&range, node, res);
        }
 }
 
diff --git a/arch/mips/powertv/Kconfig b/arch/mips/powertv/Kconfig
deleted file mode 100644 (file)
index dd91fba..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-config BOOTLOADER_FAMILY
-       string "POWERTV Bootloader Family string"
-       default "85"
-       depends on POWERTV
-       help
-         This value should be specified when the bootloader driver is disabled
-         and must be exactly two characters long. Families supported are:
-           R1 - RNG-100  R2 - RNG-200
-           A1 - Class A  B1 - Class B
-           E1 - Class E  F1 - Class F
-           44 - 45xx     46 - 46xx
-           85 - 85xx     86 - 86xx
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
deleted file mode 100644 (file)
index 39ca9f8..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
-# Portions copyright (C)  2009 Cisco Systems, Inc.
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# Makefile for the Cisco PowerTV-specific kernel interface routines
-# under Linux.
-#
-
-obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
-       asic/ pci/
-
-obj-$(CONFIG_USB) += powertv-usb.o
diff --git a/arch/mips/powertv/Platform b/arch/mips/powertv/Platform
deleted file mode 100644 (file)
index 4eb5af1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Cisco PowerTV Platform
-#
-platform-$(CONFIG_POWERTV)     += powertv/
-cflags-$(CONFIG_POWERTV)       +=                                      \
-               -I$(srctree)/arch/mips/include/asm/mach-powertv
-load-$(CONFIG_POWERTV)         += 0xffffffff90800000
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
deleted file mode 100644 (file)
index 35dcc53..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Copyright (C) 2009  Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-
-obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
-       asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
-       prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
deleted file mode 100644 (file)
index 2f539b4..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Calliope ASIC.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CALLIOPE_ADDR(x)       (CALLIOPE_IO_BASE + (x))
-
-const struct register_map calliope_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
-       .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
-       .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
-
-       .chipver3 = {.phys = CALLIOPE_ADDR(0xA00800)},
-       .chipver2 = {.phys = CALLIOPE_ADDR(0xA00804)},
-       .chipver1 = {.phys = CALLIOPE_ADDR(0xA00808)},
-       .chipver0 = {.phys = CALLIOPE_ADDR(0xA0080c)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = CALLIOPE_ADDR(0xA01800)},
-       .uart1_inten = {.phys = CALLIOPE_ADDR(0xA01804)},
-       .uart1_config1 = {.phys = CALLIOPE_ADDR(0xA01808)},
-       .uart1_config2 = {.phys = CALLIOPE_ADDR(0xA0180C)},
-       .uart1_divisorhi = {.phys = CALLIOPE_ADDR(0xA01810)},
-       .uart1_divisorlo = {.phys = CALLIOPE_ADDR(0xA01814)},
-       .uart1_data = {.phys = CALLIOPE_ADDR(0xA01818)},
-       .uart1_status = {.phys = CALLIOPE_ADDR(0xA0181C)},
-
-       .int_stat_3 = {.phys = CALLIOPE_ADDR(0xA02800)},
-       .int_stat_2 = {.phys = CALLIOPE_ADDR(0xA02804)},
-       .int_stat_1 = {.phys = CALLIOPE_ADDR(0xA02808)},
-       .int_stat_0 = {.phys = CALLIOPE_ADDR(0xA0280c)},
-       .int_config = {.phys = CALLIOPE_ADDR(0xA02810)},
-       .int_int_scan = {.phys = CALLIOPE_ADDR(0xA02818)},
-       .ien_int_3 = {.phys = CALLIOPE_ADDR(0xA02830)},
-       .ien_int_2 = {.phys = CALLIOPE_ADDR(0xA02834)},
-       .ien_int_1 = {.phys = CALLIOPE_ADDR(0xA02838)},
-       .ien_int_0 = {.phys = CALLIOPE_ADDR(0xA0283c)},
-       .int_level_3_3 = {.phys = CALLIOPE_ADDR(0xA02880)},
-       .int_level_3_2 = {.phys = CALLIOPE_ADDR(0xA02884)},
-       .int_level_3_1 = {.phys = CALLIOPE_ADDR(0xA02888)},
-       .int_level_3_0 = {.phys = CALLIOPE_ADDR(0xA0288c)},
-       .int_level_2_3 = {.phys = CALLIOPE_ADDR(0xA02890)},
-       .int_level_2_2 = {.phys = CALLIOPE_ADDR(0xA02894)},
-       .int_level_2_1 = {.phys = CALLIOPE_ADDR(0xA02898)},
-       .int_level_2_0 = {.phys = CALLIOPE_ADDR(0xA0289c)},
-       .int_level_1_3 = {.phys = CALLIOPE_ADDR(0xA028a0)},
-       .int_level_1_2 = {.phys = CALLIOPE_ADDR(0xA028a4)},
-       .int_level_1_1 = {.phys = CALLIOPE_ADDR(0xA028a8)},
-       .int_level_1_0 = {.phys = CALLIOPE_ADDR(0xA028ac)},
-       .int_level_0_3 = {.phys = CALLIOPE_ADDR(0xA028b0)},
-       .int_level_0_2 = {.phys = CALLIOPE_ADDR(0xA028b4)},
-       .int_level_0_1 = {.phys = CALLIOPE_ADDR(0xA028b8)},
-       .int_level_0_0 = {.phys = CALLIOPE_ADDR(0xA028bc)},
-       .int_docsis_en = {.phys = CALLIOPE_ADDR(0xA028F4)},
-
-       .mips_pll_setup = {.phys = CALLIOPE_ADDR(0x980000)},
-       .fs432x4b4_usb_ctl = {.phys = CALLIOPE_ADDR(0x980030)},
-       .test_bus = {.phys = CALLIOPE_ADDR(0x9800CC)},
-       .crt_spare = {.phys = CALLIOPE_ADDR(0x9800d4)},
-       .usb2_ohci_int_mask = {.phys = CALLIOPE_ADDR(0x9A000c)},
-       .usb2_strap = {.phys = CALLIOPE_ADDR(0x9A0014)},
-       .ehci_hcapbase = {.phys = CALLIOPE_ADDR(0x9BFE00)},
-       .ohci_hc_revision = {.phys = CALLIOPE_ADDR(0x9BFC00)},
-       .bcm1_bs_lmi_steer = {.phys = CALLIOPE_ADDR(0x9E0004)},
-       .usb2_control = {.phys = CALLIOPE_ADDR(0x9E0054)},
-       .usb2_stbus_obc = {.phys = CALLIOPE_ADDR(0x9BFF00)},
-       .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
-       .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
-
-       .pcie_regs = {.phys = 0x000000},        /* -doesn't exist- */
-       .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
-       .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
-       .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
-       .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
-       .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
-       .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
-       .front_panel = {.phys = 0x000000},      /* -not used- */
-};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
deleted file mode 100644 (file)
index 7f8f342..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Cronus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
-
-const struct register_map cronus_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
-       .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
-       .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
-
-       .chipver3 = {.phys = CRONUS_ADDR(0x2A0800)},
-       .chipver2 = {.phys = CRONUS_ADDR(0x2A0804)},
-       .chipver1 = {.phys = CRONUS_ADDR(0x2A0808)},
-       .chipver0 = {.phys = CRONUS_ADDR(0x2A080C)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = CRONUS_ADDR(0x2A1800)},
-       .uart1_inten = {.phys = CRONUS_ADDR(0x2A1804)},
-       .uart1_config1 = {.phys = CRONUS_ADDR(0x2A1808)},
-       .uart1_config2 = {.phys = CRONUS_ADDR(0x2A180C)},
-       .uart1_divisorhi = {.phys = CRONUS_ADDR(0x2A1810)},
-       .uart1_divisorlo = {.phys = CRONUS_ADDR(0x2A1814)},
-       .uart1_data = {.phys = CRONUS_ADDR(0x2A1818)},
-       .uart1_status = {.phys = CRONUS_ADDR(0x2A181C)},
-
-       .int_stat_3 = {.phys = CRONUS_ADDR(0x2A2800)},
-       .int_stat_2 = {.phys = CRONUS_ADDR(0x2A2804)},
-       .int_stat_1 = {.phys = CRONUS_ADDR(0x2A2808)},
-       .int_stat_0 = {.phys = CRONUS_ADDR(0x2A280C)},
-       .int_config = {.phys = CRONUS_ADDR(0x2A2810)},
-       .int_int_scan = {.phys = CRONUS_ADDR(0x2A2818)},
-       .ien_int_3 = {.phys = CRONUS_ADDR(0x2A2830)},
-       .ien_int_2 = {.phys = CRONUS_ADDR(0x2A2834)},
-       .ien_int_1 = {.phys = CRONUS_ADDR(0x2A2838)},
-       .ien_int_0 = {.phys = CRONUS_ADDR(0x2A283C)},
-       .int_level_3_3 = {.phys = CRONUS_ADDR(0x2A2880)},
-       .int_level_3_2 = {.phys = CRONUS_ADDR(0x2A2884)},
-       .int_level_3_1 = {.phys = CRONUS_ADDR(0x2A2888)},
-       .int_level_3_0 = {.phys = CRONUS_ADDR(0x2A288C)},
-       .int_level_2_3 = {.phys = CRONUS_ADDR(0x2A2890)},
-       .int_level_2_2 = {.phys = CRONUS_ADDR(0x2A2894)},
-       .int_level_2_1 = {.phys = CRONUS_ADDR(0x2A2898)},
-       .int_level_2_0 = {.phys = CRONUS_ADDR(0x2A289C)},
-       .int_level_1_3 = {.phys = CRONUS_ADDR(0x2A28A0)},
-       .int_level_1_2 = {.phys = CRONUS_ADDR(0x2A28A4)},
-       .int_level_1_1 = {.phys = CRONUS_ADDR(0x2A28A8)},
-       .int_level_1_0 = {.phys = CRONUS_ADDR(0x2A28AC)},
-       .int_level_0_3 = {.phys = CRONUS_ADDR(0x2A28B0)},
-       .int_level_0_2 = {.phys = CRONUS_ADDR(0x2A28B4)},
-       .int_level_0_1 = {.phys = CRONUS_ADDR(0x2A28B8)},
-       .int_level_0_0 = {.phys = CRONUS_ADDR(0x2A28BC)},
-       .int_docsis_en = {.phys = CRONUS_ADDR(0x2A28F4)},
-
-       .mips_pll_setup = {.phys = CRONUS_ADDR(0x1C0000)},
-       .fs432x4b4_usb_ctl = {.phys = CRONUS_ADDR(0x1C0028)},
-       .test_bus = {.phys = CRONUS_ADDR(0x1C00CC)},
-       .crt_spare = {.phys = CRONUS_ADDR(0x1c00d4)},
-       .usb2_ohci_int_mask = {.phys = CRONUS_ADDR(0x20000C)},
-       .usb2_strap = {.phys = CRONUS_ADDR(0x200014)},
-       .ehci_hcapbase = {.phys = CRONUS_ADDR(0x21FE00)},
-       .ohci_hc_revision = {.phys = CRONUS_ADDR(0x21fc00)},
-       .bcm1_bs_lmi_steer = {.phys = CRONUS_ADDR(0x2E0008)},
-       .usb2_control = {.phys = CRONUS_ADDR(0x2E004C)},
-       .usb2_stbus_obc = {.phys = CRONUS_ADDR(0x21FF00)},
-       .usb2_stbus_mess_size = {.phys = CRONUS_ADDR(0x21FF04)},
-       .usb2_stbus_chunk_size = {.phys = CRONUS_ADDR(0x21FF08)},
-
-       .pcie_regs = {.phys = CRONUS_ADDR(0x220000)},
-       .tim_ch = {.phys = CRONUS_ADDR(0x2A2C10)},
-       .tim_cl = {.phys = CRONUS_ADDR(0x2A2C14)},
-       .gpio_dout = {.phys = CRONUS_ADDR(0x2A2C20)},
-       .gpio_din = {.phys = CRONUS_ADDR(0x2A2C24)},
-       .gpio_dir = {.phys = CRONUS_ADDR(0x2A2C2C)},
-       .watchdog = {.phys = CRONUS_ADDR(0x2A2C30)},
-       .front_panel = {.phys = CRONUS_ADDR(0x2A3800)},
-};
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
deleted file mode 100644 (file)
index 1265b49..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Locations of devices in the Gaia ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-const struct register_map gaia_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
-       .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
-       .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
-
-       .chipver3 = {.phys = GAIA_IO_BASE + 0x2A0800},
-       .chipver2 = {.phys = GAIA_IO_BASE + 0x2A0804},
-       .chipver1 = {.phys = GAIA_IO_BASE + 0x2A0808},
-       .chipver0 = {.phys = GAIA_IO_BASE + 0x2A080C},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = GAIA_IO_BASE + 0x2A1800},
-       .uart1_inten = {.phys = GAIA_IO_BASE + 0x2A1804},
-       .uart1_config1 = {.phys = GAIA_IO_BASE + 0x2A1808},
-       .uart1_config2 = {.phys = GAIA_IO_BASE + 0x2A180C},
-       .uart1_divisorhi = {.phys = GAIA_IO_BASE + 0x2A1810},
-       .uart1_divisorlo = {.phys = GAIA_IO_BASE + 0x2A1814},
-       .uart1_data = {.phys = GAIA_IO_BASE + 0x2A1818},
-       .uart1_status = {.phys = GAIA_IO_BASE + 0x2A181C},
-
-       .int_stat_3 = {.phys = GAIA_IO_BASE + 0x2A2800},
-       .int_stat_2 = {.phys = GAIA_IO_BASE + 0x2A2804},
-       .int_stat_1 = {.phys = GAIA_IO_BASE + 0x2A2808},
-       .int_stat_0 = {.phys = GAIA_IO_BASE + 0x2A280C},
-       .int_config = {.phys = GAIA_IO_BASE + 0x2A2810},
-       .int_int_scan = {.phys = GAIA_IO_BASE + 0x2A2818},
-       .ien_int_3 = {.phys = GAIA_IO_BASE + 0x2A2830},
-       .ien_int_2 = {.phys = GAIA_IO_BASE + 0x2A2834},
-       .ien_int_1 = {.phys = GAIA_IO_BASE + 0x2A2838},
-       .ien_int_0 = {.phys = GAIA_IO_BASE + 0x2A283C},
-       .int_level_3_3 = {.phys = GAIA_IO_BASE + 0x2A2880},
-       .int_level_3_2 = {.phys = GAIA_IO_BASE + 0x2A2884},
-       .int_level_3_1 = {.phys = GAIA_IO_BASE + 0x2A2888},
-       .int_level_3_0 = {.phys = GAIA_IO_BASE + 0x2A288C},
-       .int_level_2_3 = {.phys = GAIA_IO_BASE + 0x2A2890},
-       .int_level_2_2 = {.phys = GAIA_IO_BASE + 0x2A2894},
-       .int_level_2_1 = {.phys = GAIA_IO_BASE + 0x2A2898},
-       .int_level_2_0 = {.phys = GAIA_IO_BASE + 0x2A289C},
-       .int_level_1_3 = {.phys = GAIA_IO_BASE + 0x2A28A0},
-       .int_level_1_2 = {.phys = GAIA_IO_BASE + 0x2A28A4},
-       .int_level_1_1 = {.phys = GAIA_IO_BASE + 0x2A28A8},
-       .int_level_1_0 = {.phys = GAIA_IO_BASE + 0x2A28AC},
-       .int_level_0_3 = {.phys = GAIA_IO_BASE + 0x2A28B0},
-       .int_level_0_2 = {.phys = GAIA_IO_BASE + 0x2A28B4},
-       .int_level_0_1 = {.phys = GAIA_IO_BASE + 0x2A28B8},
-       .int_level_0_0 = {.phys = GAIA_IO_BASE + 0x2A28BC},
-       .int_docsis_en = {.phys = GAIA_IO_BASE + 0x2A28F4},
-
-       .mips_pll_setup = {.phys = GAIA_IO_BASE + 0x1C0000},
-       .fs432x4b4_usb_ctl = {.phys = GAIA_IO_BASE + 0x1C0024},
-       .test_bus = {.phys = GAIA_IO_BASE + 0x1C00CC},
-       .crt_spare = {.phys = GAIA_IO_BASE + 0x1c0108},
-       .usb2_ohci_int_mask = {.phys = GAIA_IO_BASE + 0x20000C},
-       .usb2_strap = {.phys = GAIA_IO_BASE + 0x200014},
-       .ehci_hcapbase = {.phys = GAIA_IO_BASE + 0x21FE00},
-       .ohci_hc_revision = {.phys = GAIA_IO_BASE + 0x21fc00},
-       .bcm1_bs_lmi_steer = {.phys = GAIA_IO_BASE + 0x2E0004},
-       .usb2_control = {.phys = GAIA_IO_BASE + 0x2E004C},
-       .usb2_stbus_obc = {.phys = GAIA_IO_BASE + 0x21FF00},
-       .usb2_stbus_mess_size = {.phys = GAIA_IO_BASE + 0x21FF04},
-       .usb2_stbus_chunk_size = {.phys = GAIA_IO_BASE + 0x21FF08},
-
-       .pcie_regs = {.phys = GAIA_IO_BASE + 0x220000},
-       .tim_ch = {.phys = GAIA_IO_BASE + 0x2A2C10},
-       .tim_cl = {.phys = GAIA_IO_BASE + 0x2A2C14},
-       .gpio_dout = {.phys = GAIA_IO_BASE + 0x2A2C20},
-       .gpio_din = {.phys = GAIA_IO_BASE + 0x2A2C24},
-       .gpio_dir = {.phys = GAIA_IO_BASE + 0x2A2C2C},
-       .watchdog = {.phys = GAIA_IO_BASE + 0x2A2C30},
-       .front_panel = {.phys = GAIA_IO_BASE + 0x2A3800},
-};
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
deleted file mode 100644 (file)
index 14e7de1..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Zeus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define ZEUS_ADDR(x)   (ZEUS_IO_BASE + (x))
-
-const struct register_map zeus_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
-       .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
-       .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
-
-       .chipver3 = {.phys = ZEUS_ADDR(0x280800)},
-       .chipver2 = {.phys = ZEUS_ADDR(0x280804)},
-       .chipver1 = {.phys = ZEUS_ADDR(0x280808)},
-       .chipver0 = {.phys = ZEUS_ADDR(0x28080c)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = ZEUS_ADDR(0x281800)},
-       .uart1_inten = {.phys = ZEUS_ADDR(0x281804)},
-       .uart1_config1 = {.phys = ZEUS_ADDR(0x281808)},
-       .uart1_config2 = {.phys = ZEUS_ADDR(0x28180C)},
-       .uart1_divisorhi = {.phys = ZEUS_ADDR(0x281810)},
-       .uart1_divisorlo = {.phys = ZEUS_ADDR(0x281814)},
-       .uart1_data = {.phys = ZEUS_ADDR(0x281818)},
-       .uart1_status = {.phys = ZEUS_ADDR(0x28181C)},
-
-       .int_stat_3 = {.phys = ZEUS_ADDR(0x282800)},
-       .int_stat_2 = {.phys = ZEUS_ADDR(0x282804)},
-       .int_stat_1 = {.phys = ZEUS_ADDR(0x282808)},
-       .int_stat_0 = {.phys = ZEUS_ADDR(0x28280c)},
-       .int_config = {.phys = ZEUS_ADDR(0x282810)},
-       .int_int_scan = {.phys = ZEUS_ADDR(0x282818)},
-       .ien_int_3 = {.phys = ZEUS_ADDR(0x282830)},
-       .ien_int_2 = {.phys = ZEUS_ADDR(0x282834)},
-       .ien_int_1 = {.phys = ZEUS_ADDR(0x282838)},
-       .ien_int_0 = {.phys = ZEUS_ADDR(0x28283c)},
-       .int_level_3_3 = {.phys = ZEUS_ADDR(0x282880)},
-       .int_level_3_2 = {.phys = ZEUS_ADDR(0x282884)},
-       .int_level_3_1 = {.phys = ZEUS_ADDR(0x282888)},
-       .int_level_3_0 = {.phys = ZEUS_ADDR(0x28288c)},
-       .int_level_2_3 = {.phys = ZEUS_ADDR(0x282890)},
-       .int_level_2_2 = {.phys = ZEUS_ADDR(0x282894)},
-       .int_level_2_1 = {.phys = ZEUS_ADDR(0x282898)},
-       .int_level_2_0 = {.phys = ZEUS_ADDR(0x28289c)},
-       .int_level_1_3 = {.phys = ZEUS_ADDR(0x2828a0)},
-       .int_level_1_2 = {.phys = ZEUS_ADDR(0x2828a4)},
-       .int_level_1_1 = {.phys = ZEUS_ADDR(0x2828a8)},
-       .int_level_1_0 = {.phys = ZEUS_ADDR(0x2828ac)},
-       .int_level_0_3 = {.phys = ZEUS_ADDR(0x2828b0)},
-       .int_level_0_2 = {.phys = ZEUS_ADDR(0x2828b4)},
-       .int_level_0_1 = {.phys = ZEUS_ADDR(0x2828b8)},
-       .int_level_0_0 = {.phys = ZEUS_ADDR(0x2828bc)},
-       .int_docsis_en = {.phys = ZEUS_ADDR(0x2828F4)},
-
-       .mips_pll_setup = {.phys = ZEUS_ADDR(0x1a0000)},
-       .fs432x4b4_usb_ctl = {.phys = ZEUS_ADDR(0x1a0018)},
-       .test_bus = {.phys = ZEUS_ADDR(0x1a0238)},
-       .crt_spare = {.phys = ZEUS_ADDR(0x1a0090)},
-       .usb2_ohci_int_mask = {.phys = ZEUS_ADDR(0x1e000c)},
-       .usb2_strap = {.phys = ZEUS_ADDR(0x1e0014)},
-       .ehci_hcapbase = {.phys = ZEUS_ADDR(0x1FFE00)},
-       .ohci_hc_revision = {.phys = ZEUS_ADDR(0x1FFC00)},
-       .bcm1_bs_lmi_steer = {.phys = ZEUS_ADDR(0x2C0008)},
-       .usb2_control = {.phys = ZEUS_ADDR(0x2c01a0)},
-       .usb2_stbus_obc = {.phys = ZEUS_ADDR(0x1FFF00)},
-       .usb2_stbus_mess_size = {.phys = ZEUS_ADDR(0x1FFF04)},
-       .usb2_stbus_chunk_size = {.phys = ZEUS_ADDR(0x1FFF08)},
-
-       .pcie_regs = {.phys = ZEUS_ADDR(0x200000)},
-       .tim_ch = {.phys = ZEUS_ADDR(0x282C10)},
-       .tim_cl = {.phys = ZEUS_ADDR(0x282C14)},
-       .gpio_dout = {.phys = ZEUS_ADDR(0x282c20)},
-       .gpio_din = {.phys = ZEUS_ADDR(0x282c24)},
-       .gpio_dir = {.phys = ZEUS_ADDR(0x282c2C)},
-       .watchdog = {.phys = ZEUS_ADDR(0x282c30)},
-       .front_panel = {.phys = ZEUS_ADDR(0x283800)},
-};
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
deleted file mode 100644 (file)
index 8380605..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- *
- * Description:         Defines the platform resources for Gaia-based settops.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <asm/page.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/asic_regs.h>
-#include <asm/mach-powertv/interrupts.h>
-
-#ifdef CONFIG_BOOTLOADER_DRIVER
-#include <asm/mach-powertv/kbldr.h>
-#endif
-#include <asm/bootinfo.h>
-
-#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
-
-/*
- * Forward Prototypes
- */
-static void pmem_setup_resource(void);
-
-/*
- * Global Variables
- */
-enum asic_type asic;
-
-unsigned int platform_features;
-unsigned int platform_family;
-struct register_map _asic_register_map;
-EXPORT_SYMBOL(_asic_register_map);             /* Exported for testing */
-unsigned long asic_phy_base;
-unsigned long asic_base;
-EXPORT_SYMBOL(asic_base);                      /* Exported for testing */
-struct resource *gp_resources;
-
-/*
- * Don't recommend to use it directly, it is usually used by kernel internally.
- * Portable code should be using interfaces such as ioremp, dma_map_single, etc.
- */
-unsigned long phys_to_dma_offset;
-EXPORT_SYMBOL(phys_to_dma_offset);
-
-/*
- *
- * IO Resource Definition
- *
- */
-
-struct resource asic_resource = {
-       .name  = "ASIC Resource",
-       .start = 0,
-       .end   = ASIC_IO_SIZE,
-       .flags = IORESOURCE_MEM,
-};
-
-/*
- * Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure. This parameter
- * allows overriding of the bootloader-specified model.
- */
-static char __initdata cmdline[COMMAND_LINE_SIZE];
-
-#define FORCEFAMILY_PARAM      "forcefamily"
-
-/*
- * check_forcefamily - check for, and parse, forcefamily command line parameter
- * @forced_family:     Pointer to two-character array in which to store the
- *                     value of the forcedfamily parameter, if any.
- */
-static __init int check_forcefamily(unsigned char forced_family[2])
-{
-       const char *p;
-
-       forced_family[0] = '\0';
-       forced_family[1] = '\0';
-
-       /* Check the command line for a forcefamily directive */
-       strncpy(cmdline, arcs_cmdline, COMMAND_LINE_SIZE - 1);
-       p = strstr(cmdline, FORCEFAMILY_PARAM);
-       if (p && (p != cmdline) && (*(p - 1) != ' '))
-               p = strstr(p, " " FORCEFAMILY_PARAM "=");
-
-       if (p) {
-               p += strlen(FORCEFAMILY_PARAM "=");
-
-               if (*p == '\0' || *(p + 1) == '\0' ||
-                       (*(p + 2) != '\0' && *(p + 2) != ' '))
-                       pr_err(FORCEFAMILY_PARAM " must be exactly two "
-                               "characters long, ignoring value\n");
-
-               else {
-                       forced_family[0] = *p;
-                       forced_family[1] = *(p + 1);
-               }
-       }
-
-       return 0;
-}
-
-/*
- * platform_set_family - determine major platform family type.
- *
- * Returns family type; -1 if none
- * Returns the family type; -1 if none
- *
- */
-static __init noinline void platform_set_family(void)
-{
-       unsigned char forced_family[2];
-       unsigned short bootldr_family;
-
-       if (check_forcefamily(forced_family) == 0)
-               bootldr_family = BOOTLDRFAMILY(forced_family[0],
-                       forced_family[1]);
-       else
-               bootldr_family = (unsigned short) BOOTLDRFAMILY(
-                       CONFIG_BOOTLOADER_FAMILY[0],
-                       CONFIG_BOOTLOADER_FAMILY[1]);
-
-       pr_info("Bootloader Family = 0x%04X\n", bootldr_family);
-
-       switch (bootldr_family) {
-       case BOOTLDRFAMILY('R', '1'):
-               platform_family = FAMILY_1500;
-               break;
-       case BOOTLDRFAMILY('4', '4'):
-               platform_family = FAMILY_4500;
-               break;
-       case BOOTLDRFAMILY('4', '6'):
-               platform_family = FAMILY_4600;
-               break;
-       case BOOTLDRFAMILY('A', '1'):
-               platform_family = FAMILY_4600VZA;
-               break;
-       case BOOTLDRFAMILY('8', '5'):
-               platform_family = FAMILY_8500;
-               break;
-       case BOOTLDRFAMILY('R', '2'):
-               platform_family = FAMILY_8500RNG;
-               break;
-       case BOOTLDRFAMILY('8', '6'):
-               platform_family = FAMILY_8600;
-               break;
-       case BOOTLDRFAMILY('B', '1'):
-               platform_family = FAMILY_8600VZB;
-               break;
-       case BOOTLDRFAMILY('E', '1'):
-               platform_family = FAMILY_1500VZE;
-               break;
-       case BOOTLDRFAMILY('F', '1'):
-               platform_family = FAMILY_1500VZF;
-               break;
-       case BOOTLDRFAMILY('8', '7'):
-               platform_family = FAMILY_8700;
-               break;
-       default:
-               platform_family = -1;
-       }
-}
-
-unsigned int platform_get_family(void)
-{
-       return platform_family;
-}
-EXPORT_SYMBOL(platform_get_family);
-
-/*
- * platform_get_asic - determine the ASIC type.
- *
- * Returns the ASIC type, or ASIC_UNKNOWN if unknown
- *
- */
-enum asic_type platform_get_asic(void)
-{
-       return asic;
-}
-EXPORT_SYMBOL(platform_get_asic);
-
-/*
- * set_register_map - set ASIC register configuration
- * @phys_base: Physical address of the base of the ASIC registers
- * @map:       Description of key ASIC registers
- */
-static void __init set_register_map(unsigned long phys_base,
-       const struct register_map *map)
-{
-       asic_phy_base = phys_base;
-       _asic_register_map = *map;
-       register_map_virtualize(&_asic_register_map);
-       asic_base = (unsigned long)ioremap_nocache(phys_base, ASIC_IO_SIZE);
-}
-
-/**
- * configure_platform - configuration based on platform type.
- */
-void __init configure_platform(void)
-{
-       platform_set_family();
-
-       switch (platform_family) {
-       case FAMILY_1500:
-       case FAMILY_1500VZE:
-       case FAMILY_1500VZF:
-               platform_features = FFS_CAPABLE;
-               asic = ASIC_CALLIOPE;
-               set_register_map(CALLIOPE_IO_BASE, &calliope_register_map);
-
-               if (platform_family == FAMILY_1500VZE) {
-                       gp_resources = non_dvr_vze_calliope_resources;
-                       pr_info("Platform: 1500/Vz Class E - "
-                               "CALLIOPE, NON_DVR_CAPABLE\n");
-               } else if (platform_family == FAMILY_1500VZF) {
-                       gp_resources = non_dvr_vzf_calliope_resources;
-                       pr_info("Platform: 1500/Vz Class F - "
-                               "CALLIOPE, NON_DVR_CAPABLE\n");
-               } else {
-                       gp_resources = non_dvr_calliope_resources;
-                       pr_info("Platform: 1500/RNG100 - CALLIOPE, "
-                               "NON_DVR_CAPABLE\n");
-               }
-               break;
-
-       case FAMILY_4500:
-               platform_features = FFS_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_ZEUS;
-               set_register_map(ZEUS_IO_BASE, &zeus_register_map);
-               gp_resources = non_dvr_zeus_resources;
-
-               pr_info("Platform: 4500 - ZEUS, NON_DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_4600:
-       {
-               unsigned int chipversion = 0;
-
-               /* The settop has PCIE but it isn't used, so don't advertise
-                * it*/
-               platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
-
-               /* Cronus and Cronus Lite have the same register map */
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-
-               /* ASIC version will determine if this is a real CronusLite or
-                * Castrati(Cronus) */
-               chipversion  = asic_read(chipver3) << 24;
-               chipversion |= asic_read(chipver2) << 16;
-               chipversion |= asic_read(chipver1) << 8;
-               chipversion |= asic_read(chipver0);
-
-               if ((chipversion == CRONUS_10) || (chipversion == CRONUS_11))
-                       asic = ASIC_CRONUS;
-               else
-                       asic = ASIC_CRONUSLITE;
-
-               gp_resources = non_dvr_cronuslite_resources;
-               pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
-                       "chipversion=0x%08X\n",
-                       (asic == ASIC_CRONUS) ? "CRONUS" : "CRONUS LITE",
-                       chipversion);
-               break;
-       }
-       case FAMILY_4600VZA:
-               platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
-               asic = ASIC_CRONUS;
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-               gp_resources = non_dvr_cronus_resources;
-
-               pr_info("Platform: Vz Class A - CRONUS, NON_DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8500:
-       case FAMILY_8500RNG:
-               platform_features = DVR_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_ZEUS;
-               set_register_map(ZEUS_IO_BASE, &zeus_register_map);
-               gp_resources = dvr_zeus_resources;
-
-               pr_info("Platform: 8500/RNG200 - ZEUS, DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8600:
-       case FAMILY_8600VZB:
-               platform_features = DVR_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_CRONUS;
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-               gp_resources = dvr_cronus_resources;
-
-               pr_info("Platform: 8600/Vz Class B - CRONUS, "
-                       "DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8700:
-               platform_features = FFS_CAPABLE | PCIE_CAPABLE;
-               asic = ASIC_GAIA;
-               set_register_map(GAIA_IO_BASE, &gaia_register_map);
-               gp_resources = dvr_gaia_resources;
-
-               pr_info("Platform: 8700 - GAIA, DVR_CAPABLE\n");
-               break;
-
-       default:
-               pr_crit("Platform:  UNKNOWN PLATFORM\n");
-               break;
-       }
-
-       switch (asic) {
-       case ASIC_ZEUS:
-               phys_to_dma_offset = 0x30000000;
-               break;
-       case ASIC_CALLIOPE:
-               phys_to_dma_offset = 0x10000000;
-               break;
-       case ASIC_CRONUSLITE:
-               /* Fall through */
-       case ASIC_CRONUS:
-               /*
-                * TODO: We suppose 0x10000000 aliases into 0x20000000-
-                * 0x2XXXXXXX. If 0x10000000 aliases into 0x60000000-
-                * 0x6XXXXXXX, the offset should be 0x50000000, not 0x10000000.
-                */
-               phys_to_dma_offset = 0x10000000;
-               break;
-       default:
-               phys_to_dma_offset = 0x00000000;
-               break;
-       }
-}
-
-/*
- * RESOURCE ALLOCATION
- *
- */
-/*
- * Allocates/reserves the Platform memory resources early in the boot process.
- * This ignores any resources that are designated IORESOURCE_IO
- */
-void __init platform_alloc_bootmem(void)
-{
-       int i;
-       int total = 0;
-
-       /* Get persistent memory data from command line before allocating
-        * resources. This need to happen before normal command line parsing
-        * has been done */
-       pmem_setup_resource();
-
-       /* Loop through looking for resources that want a particular address */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               int size = resource_size(&gp_resources[i]);
-               if ((gp_resources[i].start != 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
-                       reserve_bootmem(dma_to_phys(gp_resources[i].start),
-                               size, 0);
-                       total += resource_size(&gp_resources[i]);
-                       pr_info("reserve resource %s at %08x (%u bytes)\n",
-                               gp_resources[i].name, gp_resources[i].start,
-                               resource_size(&gp_resources[i]));
-               }
-       }
-
-       /* Loop through assigning addresses for those that are left */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               int size = resource_size(&gp_resources[i]);
-               if ((gp_resources[i].start == 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
-                       void *mem = alloc_bootmem_pages(size);
-
-                       if (mem == NULL)
-                               pr_err("Unable to allocate bootmem pages "
-                                       "for %s\n", gp_resources[i].name);
-
-                       else {
-                               gp_resources[i].start =
-                                       phys_to_dma(virt_to_phys(mem));
-                               gp_resources[i].end =
-                                       gp_resources[i].start + size - 1;
-                               total += size;
-                               pr_info("allocate resource %s at %08x "
-                                               "(%u bytes)\n",
-                                       gp_resources[i].name,
-                                       gp_resources[i].start, size);
-                       }
-               }
-       }
-
-       pr_info("Total Platform driver memory allocation: 0x%08x\n", total);
-
-       /* indicate resources that are platform I/O related */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               if ((gp_resources[i].start != 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
-                       pr_info("reserved platform resource %s at %08x\n",
-                               gp_resources[i].name, gp_resources[i].start);
-               }
-       }
-}
-
-/*
- *
- * PERSISTENT MEMORY (PMEM) CONFIGURATION
- *
- */
-static unsigned long pmemaddr __initdata;
-
-static int __init early_param_pmemaddr(char *p)
-{
-       pmemaddr = (unsigned long)simple_strtoul(p, NULL, 0);
-       return 0;
-}
-early_param("pmemaddr", early_param_pmemaddr);
-
-static long pmemlen __initdata;
-
-static int __init early_param_pmemlen(char *p)
-{
-/* TODO: we can use this code when and if the bootloader ever changes this */
-#if 0
-       pmemlen = (unsigned long)simple_strtoul(p, NULL, 0);
-#else
-       pmemlen = 0x20000;
-#endif
-       return 0;
-}
-early_param("pmemlen", early_param_pmemlen);
-
-/*
- * Set up persistent memory. If we were given values, we patch the array of
- * resources. Otherwise, persistent memory may be allocated anywhere at all.
- */
-static void __init pmem_setup_resource(void)
-{
-       struct resource *resource;
-       resource = asic_resource_get("DiagPersistentMemory");
-
-       if (resource && pmemaddr && pmemlen) {
-               /* The address provided by bootloader is in kseg0. Convert to
-                * a bus address. */
-               resource->start = phys_to_dma(pmemaddr - 0x80000000);
-               resource->end = resource->start + pmemlen - 1;
-
-               pr_info("persistent memory: start=0x%x  end=0x%x\n",
-                       resource->start, resource->end);
-       }
-}
-
-/*
- *
- * RESOURCE ACCESS FUNCTIONS
- *
- */
-
-/**
- * asic_resource_get - retrieves parameters for a platform resource.
- * @name:      string to match resource
- *
- * Returns a pointer to a struct resource corresponding to the given name.
- *
- * CANNOT BE NAMED platform_resource_get, which would be the obvious choice,
- * as this function name is already declared
- */
-struct resource *asic_resource_get(const char *name)
-{
-       int i;
-
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               if (strcmp(gp_resources[i].name, name) == 0)
-                       return &gp_resources[i];
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL(asic_resource_get);
-
-/**
- * platform_release_memory - release pre-allocated memory
- * @ptr:       pointer to memory to release
- * @size:      size of resource
- *
- * This must only be called for memory allocated or reserved via the boot
- * memory allocator.
- */
-void platform_release_memory(void *ptr, int size)
-{
-       free_reserved_area(ptr, ptr + size, -1, NULL);
-}
-EXPORT_SYMBOL(platform_release_memory);
-
-/*
- *
- * FEATURE AVAILABILITY FUNCTIONS
- *
- */
-int platform_supports_dvr(void)
-{
-       return (platform_features & DVR_CAPABLE) != 0;
-}
-
-int platform_supports_ffs(void)
-{
-       return (platform_features & FFS_CAPABLE) != 0;
-}
-
-int platform_supports_pcie(void)
-{
-       return (platform_features & PCIE_CAPABLE) != 0;
-}
-
-int platform_supports_display(void)
-{
-       return (platform_features & DISPLAY_CAPABLE) != 0;
-}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
deleted file mode 100644 (file)
index f44cd92..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Routines for generic manipulation of the interrupts found on the PowerTV
- * platform.
- *
- * The interrupt controller is located in the South Bridge a PIIX4 device
- * with two internal 82C95 interrupt controllers.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/kernel.h>
-#include <linux/random.h>
-
-#include <asm/irq_cpu.h>
-#include <linux/io.h>
-#include <asm/irq_regs.h>
-#include <asm/setup.h>
-#include <asm/mips-boards/generic.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static DEFINE_RAW_SPINLOCK(asic_irq_lock);
-
-static inline int get_int(void)
-{
-       unsigned long flags;
-       int irq;
-
-       raw_spin_lock_irqsave(&asic_irq_lock, flags);
-
-       irq = (asic_read(int_int_scan) >> 4) - 1;
-
-       if (irq == 0 || irq >= NR_IRQS)
-               irq = -1;
-
-       raw_spin_unlock_irqrestore(&asic_irq_lock, flags);
-
-       return irq;
-}
-
-static void asic_irqdispatch(void)
-{
-       int irq;
-
-       irq = get_int();
-       if (irq < 0)
-               return;  /* interrupt has already been cleared */
-
-       do_IRQ(irq);
-}
-
-static inline int clz(unsigned long x)
-{
-       __asm__(
-       "       .set    push                                    \n"
-       "       .set    mips32                                  \n"
-       "       clz     %0, %1                                  \n"
-       "       .set    pop                                     \n"
-       : "=r" (x)
-       : "r" (x));
-
-       return x;
-}
-
-/*
- * Version of ffs that only looks at bits 12..15.
- */
-static inline unsigned int irq_ffs(unsigned int pending)
-{
-       return fls(pending) - 1 + CAUSEB_IP;
-}
-
-/*
- * TODO: check how it works under EIC mode.
- */
-asmlinkage void plat_irq_dispatch(void)
-{
-       unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
-       int irq;
-
-       irq = irq_ffs(pending);
-
-       if (irq == CAUSEF_IP3)
-               asic_irqdispatch();
-       else if (irq >= 0)
-               do_IRQ(irq);
-       else
-               spurious_interrupt();
-}
-
-void __init arch_init_irq(void)
-{
-       int i;
-
-       asic_irq_init();
-
-       /*
-        * Initialize interrupt exception vectors.
-        */
-       if (cpu_has_veic || cpu_has_vint) {
-               int nvec = cpu_has_veic ? 64 : 8;
-               for (i = 0; i < nvec; i++)
-                       set_vi_handler(i, asic_irqdispatch);
-       }
-}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
deleted file mode 100644 (file)
index 9344902..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Portions copyright (C) 2005-2009 Scientific Atlanta
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * Modified from arch/mips/kernel/irq-rm7000.c:
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under  the terms of the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static inline void unmask_asic_irq(struct irq_data *d)
-{
-       unsigned long enable_bit;
-       unsigned int irq = d->irq;
-
-       enable_bit = (1 << (irq & 0x1f));
-
-       switch (irq >> 5) {
-       case 0:
-               asic_write(asic_read(ien_int_0) | enable_bit, ien_int_0);
-               break;
-       case 1:
-               asic_write(asic_read(ien_int_1) | enable_bit, ien_int_1);
-               break;
-       case 2:
-               asic_write(asic_read(ien_int_2) | enable_bit, ien_int_2);
-               break;
-       case 3:
-               asic_write(asic_read(ien_int_3) | enable_bit, ien_int_3);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static inline void mask_asic_irq(struct irq_data *d)
-{
-       unsigned long disable_mask;
-       unsigned int irq = d->irq;
-
-       disable_mask = ~(1 << (irq & 0x1f));
-
-       switch (irq >> 5) {
-       case 0:
-               asic_write(asic_read(ien_int_0) & disable_mask, ien_int_0);
-               break;
-       case 1:
-               asic_write(asic_read(ien_int_1) & disable_mask, ien_int_1);
-               break;
-       case 2:
-               asic_write(asic_read(ien_int_2) & disable_mask, ien_int_2);
-               break;
-       case 3:
-               asic_write(asic_read(ien_int_3) & disable_mask, ien_int_3);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static struct irq_chip asic_irq_chip = {
-       .name = "ASIC Level",
-       .irq_mask = mask_asic_irq,
-       .irq_unmask = unmask_asic_irq,
-};
-
-void __init asic_irq_init(void)
-{
-       int i;
-
-       /* set priority to 0 */
-       write_c0_status(read_c0_status() & ~(0x0000fc00));
-
-       asic_write(0, ien_int_0);
-       asic_write(0, ien_int_1);
-       asic_write(0, ien_int_2);
-       asic_write(0, ien_int_3);
-
-       asic_write(0x0fffffff, int_level_3_3);
-       asic_write(0xffffffff, int_level_3_2);
-       asic_write(0xffffffff, int_level_3_1);
-       asic_write(0xffffffff, int_level_3_0);
-       asic_write(0xffffffff, int_level_2_3);
-       asic_write(0xffffffff, int_level_2_2);
-       asic_write(0xffffffff, int_level_2_1);
-       asic_write(0xffffffff, int_level_2_0);
-       asic_write(0xffffffff, int_level_1_3);
-       asic_write(0xffffffff, int_level_1_2);
-       asic_write(0xffffffff, int_level_1_1);
-       asic_write(0xffffffff, int_level_1_0);
-       asic_write(0xffffffff, int_level_0_3);
-       asic_write(0xffffffff, int_level_0_2);
-       asic_write(0xffffffff, int_level_0_1);
-       asic_write(0xffffffff, int_level_0_0);
-
-       asic_write(0xf, int_int_scan);
-
-       /*
-        * Initialize interrupt handlers.
-        */
-       for (i = 0; i < NR_IRQS; i++)
-               irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
-}
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
deleted file mode 100644 (file)
index 98dc516..0000000
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Memory pre-allocations for Calliope boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CALLIOPE RESOURCES
- */
-struct resource non_dvr_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-
-struct resource non_dvr_vze_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (10.12MiB) */
-       PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 3.125MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (2.59MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-struct resource non_dvr_vzf_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 4.5MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
deleted file mode 100644 (file)
index 7c6ce75..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Memory pre-allocations for Cronus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource dvr_cronus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * ITFS
-        */
-       /* 815,104 bytes each for 2 ITFS partitions. */
-       PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
-
-       /*
-        * AVFS
-        */
-       /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
-               IORESOURCE_MEM)
-
-       /* 4KiB */
-       PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-/*
- * NON_DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource non_dvr_cronus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
deleted file mode 100644 (file)
index a7937ba..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Memory pre-allocations for Cronus Lite boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CRONUSLITE RESOURCES
- */
-struct resource non_dvr_cronuslite_resources[] __initdata =
-{
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-gaia.c b/arch/mips/powertv/asic/prealloc-gaia.c
deleted file mode 100644 (file)
index 2303bbf..0000000
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * Memory pre-allocations for Gaia boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-/*
- * DVR_CAPABLE GAIA RESOURCES
- */
-struct resource dvr_gaia_resources[] __initdata = {
-       /*
-        *
-        * VIDEO1 / LX1
-        *
-        */
-       {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * VIDEO2 / LX2
-        *
-        */
-       {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Sysaudio Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  DSP_Image_Buff - DSP code and data images (1MB)
-        *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-        *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-        *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-        *
-        */
-       {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * STAVEM driver/STAPI
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        *
-        */
-       {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * DOCSIS Subsystem
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * GHW HAL Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  GraphicsHeap - PowerTV Graphics Heap
-        *
-        */
-       {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * multi com buffer area
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * DMA Ring buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x00280000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer for unit0
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit0
-        *
-        */
-       {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit1
-        *
-        */
-       {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * ITFS
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "ITFS",
-               .start  = 0x64180000,
-               /* 815,104 bytes each for 2 ITFS partitions. */
-               .end    = 0x6430DFFF,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * AVFS
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
-               /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
-               .end    = 0x64AD0000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "AvfsFileSys",
-               .start  = 0x64AD0000,
-               .end    = 0x64AD1000 - 1,  /* 4K */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Smartcard
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Read and write buffers for Internal/External cards
-        *
-        */
-       {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * KAVNET
-        *    NP Reset Vector - must be of the form xxCxxxxx
-        *         NP Image - must be video bank 1
-        *         NP IPC - must be video bank 2
-        */
-       {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        * Add other resources here
-        */
-       { },
-};
-
-/*
- * NON_DVR_CAPABLE GAIA RESOURCES
- */
-struct resource non_dvr_gaia_resources[] __initdata = {
-       /*
-        *
-        * VIDEO1 / LX1
-        *
-        */
-       {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * VIDEO2 / LX2
-        *
-        */
-       {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Sysaudio Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  DSP_Image_Buff - DSP code and data images (1MB)
-        *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-        *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-        *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-        *
-        */
-       {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * STAVEM driver/STAPI
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        *
-        */
-       {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * DOCSIS Subsystem
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * GHW HAL Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  GraphicsHeap - PowerTV Graphics Heap
-        *
-        */
-       {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * multi com buffer area
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * DMA Ring buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x000AA000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer for unit0
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit0
-        *
-        */
-       {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit1
-        *
-        */
-       {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * AVFS: player HAL memory
-        *
-        *
-        */
-       {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
-               .end    = 0x645D2C00 - 1,  /* 945K * 3 for playback */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * PMEM
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Persistent memory for diagnostics.
-        *
-        */
-       {
-               .name   = "DiagPersistentMemory",
-               .start  = 0x00000000,
-               .end    = 0x10000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Smartcard
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Read and write buffers for Internal/External cards
-        *
-        */
-       {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * KAVNET
-        *    NP Reset Vector - must be of the form xxCxxxxx
-        *         NP Image - must be video bank 1
-        *         NP IPC - must be video bank 2
-        */
-       {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       { },
-};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
deleted file mode 100644 (file)
index 6e76f09..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Memory pre-allocations for Zeus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE RESOURCES
- */
-struct resource dvr_zeus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       /* 2.5MiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * ITFS
-        */
-       /* 815,104 bytes each for 2 ITFS partitions. */
-       PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS
-        */
-       /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* 4KiB */
-       PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-/*
- * NON_DVR_CAPABLE ZEUS RESOURCES
- */
-struct resource non_dvr_zeus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       /* 2.5MiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
deleted file mode 100644 (file)
index 8e682df..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Definitions for memory preallocations
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-
-#define KIBIBYTE(n) ((n) * 1024)    /* Number of kibibytes */
-#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-
-/* "struct resource" array element definition */
-#define PREALLOC(NAME, START, END, FLAGS) {    \
-               .name = (NAME),                 \
-               .start = (START),               \
-               .end = (END),                   \
-               .flags = (FLAGS)                \
-       },
-
-/* Individual resources in the preallocated resource arrays are defined using
- *  macros.  These macros are conditionally defined based on their
- *  corresponding kernel configuration flag:
- *    - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
- *    - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
- *    - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
- *    - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
- */
-#ifdef CONFIG_PREALLOC_NORMAL
-#define PREALLOC_NORMAL(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_NORMAL(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_TFTP
-#define PREALLOC_TFTP(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_TFTP(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_DOCSIS
-#define PREALLOC_DOCSIS(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_DOCSIS(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_PMEM
-#define PREALLOC_PMEM(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_PMEM(name, start, end, flags)
-#endif
-#endif
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
deleted file mode 100644 (file)
index 4989263..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
- *     All rights reserved.
- *     Authors: Carsten Langgaard <carstenl@mips.com>
- *              Maciej W. Rozycki <macro@mips.com>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * PROM library initialisation code.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-
-#include <asm/mips-boards/generic.h>
-#include <asm/mach-powertv/asic.h>
-
-#include "init.h"
-
-static int *_prom_envp;
-unsigned long _prom_memsize;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
-{
-       char *result = NULL;
-
-       if (_prom_envp != NULL) {
-               /*
-                * Return a pointer to the given environment variable.
-                * In 64-bit mode: we're using 64-bit pointers, but all pointers
-                * in the PROM structures are only 32-bit, so we need some
-                * workarounds, if we are running in 64-bit mode.
-                */
-               int i, index = 0;
-
-               i = strlen(envname);
-
-               while (prom_envp(index)) {
-                       if (strncmp(envname, prom_envp(index), i) == 0) {
-                               result = prom_envp(index + 1);
-                               break;
-                       }
-                       index += 2;
-               }
-       }
-
-       return result;
-}
-
-void __init prom_init(void)
-{
-       int prom_argc;
-       char *prom_argv;
-
-       prom_argc = fw_arg0;
-       prom_argv = (char *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-       _prom_memsize = (unsigned long) fw_arg3;
-
-       if (prom_argc == 1) {
-               strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
-               strlcat(arcs_cmdline, prom_argv, COMMAND_LINE_SIZE);
-       }
-
-       configure_platform();
-       prom_meminit();
-}
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
deleted file mode 100644 (file)
index c1a8bd0..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Definitions from powertv init.c file
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_INIT_H
-#define _POWERTV_INIT_H
-extern unsigned long _prom_memsize;
-extern void prom_meminit(void);
-extern char *prom_getenv(char *name);
-#endif
diff --git a/arch/mips/powertv/ioremap.c b/arch/mips/powertv/ioremap.c
deleted file mode 100644 (file)
index d060478..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- *                     ioremap.c
- *
- * Support for mapping between dma_addr_t values a phys_addr_t values.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn <dvomlehn@cisco.com>
- *
- * Description:         Defines the platform resources for the SA settop.
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/mach-powertv/ioremap.h>
-
-/*
- * Define the sizes of and masks for grains in physical and DMA space. The
- * values are the same but the types are not.
- */
-#define IOR_PHYS_GRAIN         ((phys_addr_t) 1 << IOR_LSBITS)
-#define IOR_PHYS_GRAIN_MASK    (IOR_PHYS_GRAIN - 1)
-
-#define IOR_DMA_GRAIN          ((dma_addr_t) 1 << IOR_LSBITS)
-#define IOR_DMA_GRAIN_MASK     (IOR_DMA_GRAIN - 1)
-
-/*
- * Values that, when accessed by an index derived from a phys_addr_t and
- * added to phys_addr_t value, yield a DMA address
- */
-struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-EXPORT_SYMBOL(_ior_phys_to_dma);
-
-/*
- * Values that, when accessed by an index derived from a dma_addr_t and
- * added to that dma_addr_t value, yield a physical address
- */
-struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-EXPORT_SYMBOL(_ior_dma_to_phys);
-
-/**
- * setup_dma_to_phys - set up conversion from DMA to physical addresses
- * @dma_idx:   Top IOR_LSBITS bits of the DMA address, i.e. an index
- *             into the array _dma_to_phys.
- * @delta:     Value that, when added to the DMA address, will yield the
- *             physical address
- * @s:         Number of bytes in the section of memory with the given delta
- *             between DMA and physical addresses.
- */
-static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s)
-{
-       int dma_idx, first_idx, last_idx;
-       phys_addr_t first, last;
-
-       /*
-        * Calculate the first and last indices, rounding the first up and
-        * the second down.
-        */
-       first = dma & ~IOR_DMA_GRAIN_MASK;
-       last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK;
-       first_idx = first >> IOR_LSBITS;                /* Convert to indices */
-       last_idx = last >> IOR_LSBITS;
-
-       for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++)
-               _ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT;
-}
-
-/**
- * setup_phys_to_dma - set up conversion from DMA to physical addresses
- * @phys_idx:  Top IOR_LSBITS bits of the DMA address, i.e. an index
- *             into the array _phys_to_dma.
- * @delta:     Value that, when added to the DMA address, will yield the
- *             physical address
- * @s:         Number of bytes in the section of memory with the given delta
- *             between DMA and physical addresses.
- */
-static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s)
-{
-       int phys_idx, first_idx, last_idx;
-       phys_addr_t first, last;
-
-       /*
-        * Calculate the first and last indices, rounding the first up and
-        * the second down.
-        */
-       first = phys & ~IOR_PHYS_GRAIN_MASK;
-       last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK;
-       first_idx = first >> IOR_LSBITS;                /* Convert to indices */
-       last_idx = last >> IOR_LSBITS;
-
-       for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++)
-               _ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT;
-}
-
-/**
- * ioremap_add_map - add to the physical and DMA address conversion arrays
- * @phys:      Process's view of the address of the start of the memory chunk
- * @dma:       DMA address of the start of the memory chunk
- * @size:      Size, in bytes, of the chunk of memory
- *
- * NOTE: It might be obvious, but the assumption is that all @size bytes have
- * the same offset between the physical address and the DMA address.
- */
-void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size)
-{
-       if (size == 0)
-               return;
-
-       if ((dma & IOR_DMA_GRAIN_MASK) != 0 ||
-               (phys & IOR_PHYS_GRAIN_MASK) != 0 ||
-               (size & IOR_PHYS_GRAIN_MASK) != 0)
-               pr_crit("Memory allocation must be in chunks of 0x%x bytes\n",
-                       IOR_PHYS_GRAIN);
-
-       setup_dma_to_phys(dma, phys - dma, size);
-       setup_phys_to_dma(phys, dma - phys, size);
-}
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
deleted file mode 100644 (file)
index bc2f3ca..0000000
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Apparently originally from arch/mips/malta-memory.c. Modified to work
- * with the PowerTV bootloader.
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/pfn.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/sections.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/ioremap.h>
-
-#include "init.h"
-
-/* Memory constants */
-#define KIBIBYTE(n)            ((n) * 1024)    /* Number of kibibytes */
-#define MEBIBYTE(n)            ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-#define DEFAULT_MEMSIZE                MEBIBYTE(128)   /* If no memsize provided */
-
-#define BLDR_SIZE      KIBIBYTE(256)           /* Memory reserved for bldr */
-#define RV_SIZE                MEBIBYTE(4)             /* Size of reset vector */
-
-#define LOW_MEM_END    0x20000000              /* Highest low memory address */
-#define BLDR_ALIAS     0x10000000              /* Bootloader address */
-#define RV_PHYS                0x1fc00000              /* Reset vector address */
-#define LOW_RAM_END    RV_PHYS                 /* End of real RAM in low mem */
-
-/*
- * Very low-level conversion from processor physical address to device
- * DMA address for the first bank of memory.
- */
-#define PHYS_TO_DMA(paddr)     ((paddr) + (CONFIG_LOW_RAM_DMA - LOW_RAM_ALIAS))
-
-unsigned long ptv_memsize;
-
-/*
- * struct low_mem_reserved - Items in low memory that are reserved
- * @start:     Physical address of item
- * @size:      Size, in bytes, of this item
- * @is_aliased: True if this is RAM aliased from another location. If false,
- *             it is something other than aliased RAM and the RAM in the
- *             unaliased address is still visible outside of low memory.
- */
-struct low_mem_reserved {
-       phys_addr_t     start;
-       phys_addr_t     size;
-       bool            is_aliased;
-};
-
-/*
- * Must be in ascending address order
- */
-struct low_mem_reserved low_mem_reserved[] = {
-       {BLDR_ALIAS, BLDR_SIZE, true},  /* Bootloader RAM */
-       {RV_PHYS, RV_SIZE, false},      /* Reset vector */
-};
-
-/*
- * struct mem_layout - layout of a piece of the system RAM
- * @phys:      Physical address of the start of this piece of RAM. This is the
- *             address at which both the processor and I/O devices see the
- *             RAM.
- * @alias:     Alias of this piece of memory in order to make it appear in
- *             the low memory part of the processor's address space. I/O
- *             devices don't see anything here.
- * @size:      Size, in bytes, of this piece of RAM
- */
-struct mem_layout {
-       phys_addr_t     phys;
-       phys_addr_t     alias;
-       phys_addr_t     size;
-};
-
-/*
- * struct mem_layout_list - list descriptor for layouts of system RAM pieces
- * @family:    Specifies the family being described
- * @n:         Number of &struct mem_layout elements
- * @layout:    Pointer to the list of &mem_layout structures
- */
-struct mem_layout_list {
-       enum family_type        family;
-       size_t                  n;
-       struct mem_layout       *layout;
-};
-
-static struct mem_layout f1500_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(256)},
-};
-
-static struct mem_layout f4500_layout[] = {
-       {0x40000000, 0x10000000, MEBIBYTE(256)},
-       {0x20000000, 0x20000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout f8500_layout[] = {
-       {0x40000000, 0x10000000, MEBIBYTE(256)},
-       {0x20000000, 0x20000000, MEBIBYTE(32)},
-       {0x30000000, 0x30000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout fx600_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(256)},
-       {0x60000000, 0x60000000, MEBIBYTE(128)},
-};
-
-static struct mem_layout_list layout_list[] = {
-       {FAMILY_1500, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_1500VZE, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_1500VZF, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_4500, ARRAY_SIZE(f4500_layout), f4500_layout},
-       {FAMILY_8500, ARRAY_SIZE(f8500_layout), f8500_layout},
-       {FAMILY_8500RNG, ARRAY_SIZE(f8500_layout), f8500_layout},
-       {FAMILY_4600, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_4600VZA, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_8600, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_8600VZB, ARRAY_SIZE(fx600_layout), fx600_layout},
-};
-
-/* If we can't determine the layout, use this */
-static struct mem_layout default_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(128)},
-};
-
-/**
- * register_non_ram - register low memory not available for RAM usage
- */
-static __init void register_non_ram(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(low_mem_reserved); i++)
-               add_memory_region(low_mem_reserved[i].start,
-                       low_mem_reserved[i].size, BOOT_MEM_RESERVED);
-}
-
-/**
- * get_memsize - get the size of memory as a single bank
- */
-static phys_addr_t get_memsize(void)
-{
-       static char cmdline[COMMAND_LINE_SIZE] __initdata;
-       phys_addr_t memsize = 0;
-       char *memsize_str;
-       char *ptr;
-
-       /* Check the command line first for a memsize directive */
-       strcpy(cmdline, arcs_cmdline);
-       ptr = strstr(cmdline, "memsize=");
-       if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
-               ptr = strstr(ptr, " memsize=");
-
-       if (ptr) {
-               memsize = memparse(ptr + 8, &ptr);
-       } else {
-               /* otherwise look in the environment */
-               memsize_str = prom_getenv("memsize");
-
-               if (memsize_str != NULL) {
-                       pr_info("prom memsize = %s\n", memsize_str);
-                       memsize = simple_strtol(memsize_str, NULL, 0);
-               }
-
-               if (memsize == 0) {
-                       if (_prom_memsize != 0) {
-                               memsize = _prom_memsize;
-                               pr_info("_prom_memsize = 0x%x\n", memsize);
-                               /* add in memory that the bootloader doesn't
-                                * report */
-                               memsize += BLDR_SIZE;
-                       } else {
-                               memsize = DEFAULT_MEMSIZE;
-                               pr_info("Memsize not passed by bootloader, "
-                                       "defaulting to 0x%x\n", memsize);
-                       }
-               }
-       }
-
-       return memsize;
-}
-
-/**
- * register_low_ram - register an aliased section of RAM
- * @p:         Alias address of memory
- * @n:         Number of bytes in this section of memory
- *
- * Returns the number of bytes registered
- *
- */
-static __init phys_addr_t register_low_ram(phys_addr_t p, phys_addr_t n)
-{
-       phys_addr_t s;
-       int i;
-       phys_addr_t orig_n;
-
-       orig_n = n;
-
-       BUG_ON(p + n > RV_PHYS);
-
-       for (i = 0; n != 0 && i < ARRAY_SIZE(low_mem_reserved); i++) {
-               phys_addr_t start;
-               phys_addr_t size;
-
-               start = low_mem_reserved[i].start;
-               size = low_mem_reserved[i].size;
-
-               /* Handle memory before this low memory section */
-               if (p < start) {
-                       phys_addr_t s;
-                       s = min(n, start - p);
-                       add_memory_region(p, s, BOOT_MEM_RAM);
-                       p += s;
-                       n -= s;
-               }
-
-               /* Handle the low memory section itself. If it's aliased,
-                * we reduce the number of byes left, but if not, the RAM
-                * is available elsewhere and we don't reduce the number of
-                * bytes remaining. */
-               if (p == start) {
-                       if (low_mem_reserved[i].is_aliased) {
-                               s = min(n, size);
-                               n -= s;
-                               p += s;
-                       } else
-                               p += n;
-               }
-       }
-
-       return orig_n - n;
-}
-
-/*
- * register_ram - register real RAM
- * @p: Address of memory as seen by devices
- * @alias:     If the memory is seen at an additional address by the processor,
- *             this will be the address, otherwise it is the same as @p.
- * @n:         Number of bytes in this section of memory
- */
-static __init void register_ram(phys_addr_t p, phys_addr_t alias,
-       phys_addr_t n)
-{
-       /*
-        * If some or all of this memory has an alias, break it into the
-        * aliased and non-aliased portion.
-        */
-       if (p != alias) {
-               phys_addr_t alias_size;
-               phys_addr_t registered;
-
-               alias_size = min(n, LOW_RAM_END - alias);
-               registered = register_low_ram(alias, alias_size);
-               ioremap_add_map(alias, p, n);
-               n -= registered;
-               p += registered;
-       }
-
-#ifdef CONFIG_HIGHMEM
-       if (n != 0) {
-               add_memory_region(p, n, BOOT_MEM_RAM);
-               ioremap_add_map(p, p, n);
-       }
-#endif
-}
-
-/**
- * register_address_space - register things in the address space
- * @memsize:   Number of bytes of RAM installed
- *
- * Takes the given number of bytes of RAM and registers as many of the regions,
- * or partial regions, as it can. So, the default configuration might have
- * two regions with 256 MiB each. If the memsize passed in on the command line
- * is 384 MiB, it will register the first region with 256 MiB and the second
- * with 128 MiB.
- */
-static __init void register_address_space(phys_addr_t memsize)
-{
-       int i;
-       phys_addr_t size;
-       size_t n;
-       struct mem_layout *layout;
-       enum family_type family;
-
-       /*
-        * Register all of the things that aren't available to the kernel as
-        * memory.
-        */
-       register_non_ram();
-
-       /* Find the appropriate memory description */
-       family = platform_get_family();
-
-       for (i = 0; i < ARRAY_SIZE(layout_list); i++) {
-               if (layout_list[i].family == family)
-                       break;
-       }
-
-       if (i == ARRAY_SIZE(layout_list)) {
-               n = ARRAY_SIZE(default_layout);
-               layout = default_layout;
-       } else {
-               n = layout_list[i].n;
-               layout = layout_list[i].layout;
-       }
-
-       for (i = 0; memsize != 0 && i < n; i++) {
-               size = min(memsize, layout[i].size);
-               register_ram(layout[i].phys, layout[i].alias, size);
-               memsize -= size;
-       }
-}
-
-void __init prom_meminit(void)
-{
-       ptv_memsize = get_memsize();
-       register_address_space(ptv_memsize);
-}
-
-void __init prom_free_prom_memory(void)
-{
-       unsigned long addr;
-       int i;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
-                       continue;
-
-               addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
-                               addr, addr + boot_mem_map.map[i].size);
-       }
-}
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
deleted file mode 100644 (file)
index 2610a6a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright (C) 2009  Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-
-obj-$(CONFIG_PCI)      += fixup-powertv.o
diff --git a/arch/mips/powertv/pci/fixup-powertv.c b/arch/mips/powertv/pci/fixup-powertv.c
deleted file mode 100644 (file)
index d7ecbae..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <asm/mach-powertv/interrupts.h>
-#include "powertv-pci.h"
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return asic_pcie_map_irq(dev, slot, pin);
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       return 0;
-}
-
-/*
- * asic_pcie_map_irq
- *
- * Parameters:
- * *dev - pointer to a pci_dev structure  (not used)
- * slot - slot number  (not used)
- * pin - pin number  (not used)
- *
- * Return Value:
- * Returns: IRQ number (always the PCI Express IRQ number)
- *
- * Description:
- * asic_pcie_map_irq will return the IRQ number of the PCI Express interrupt.
- *
- */
-int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return irq_pciexp;
-}
-EXPORT_SYMBOL(asic_pcie_map_irq);
diff --git a/arch/mips/powertv/pci/powertv-pci.h b/arch/mips/powertv/pci/powertv-pci.h
deleted file mode 100644 (file)
index 1b5886b..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *                             powertv-pci.c
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-/*
- * Local definitions for the powertv PCI code
- */
-
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
-
-extern int log_level;
-#endif
diff --git a/arch/mips/powertv/powertv-clock.h b/arch/mips/powertv/powertv-clock.h
deleted file mode 100644 (file)
index d94c543..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_CLOCK_H
-#define _POWERTV_POWERTV_CLOCK_H
-extern int powertv_clockevent_init(void);
-extern void powertv_clocksource_init(void);
-extern unsigned int mips_get_pll_freq(void);
-#endif
diff --git a/arch/mips/powertv/powertv-usb.c b/arch/mips/powertv/powertv-usb.c
deleted file mode 100644 (file)
index d845eac..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- *                             powertv-usb.c
- *
- * Description:         ASIC-specific USB device setup and shutdown
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/interrupts.h>
-
-/* misc_clk_ctl1 values */
-#define MCC1_30MHZ_POWERUP_SELECT      (1 << 14)
-#define MCC1_DIV9                      (1 << 13)
-#define MCC1_ETHMIPS_POWERUP_SELECT    (1 << 11)
-#define MCC1_USB_POWERUP_SELECT                (1 << 1)
-#define MCC1_CLOCK108_POWERUP_SELECT   (1 << 0)
-
-/* Possible values for clock select */
-#define MCC1_USB_CLOCK_HIGH_Z          (0 << 4)
-#define MCC1_USB_CLOCK_48MHZ           (1 << 4)
-#define MCC1_USB_CLOCK_24MHZ           (2 << 4)
-#define MCC1_USB_CLOCK_6MHZ            (3 << 4)
-
-#define MCC1_CONFIG    (MCC1_30MHZ_POWERUP_SELECT |            \
-                        MCC1_DIV9 |                            \
-                        MCC1_ETHMIPS_POWERUP_SELECT |          \
-                        MCC1_USB_POWERUP_SELECT |              \
-                        MCC1_CLOCK108_POWERUP_SELECT)
-
-/* misc_clk_ctl2 values */
-#define MCC2_GMII_GCLK_TO_PAD          (1 << 31)
-#define MCC2_ETHER125_0_CLOCK_SELECT   (1 << 29)
-#define MCC2_RMII_0_CLOCK_SELECT       (1 << 28)
-#define MCC2_GMII_TX0_CLOCK_SELECT     (1 << 27)
-#define MCC2_GMII_RX0_CLOCK_SELECT     (1 << 26)
-#define MCC2_ETHER125_1_CLOCK_SELECT   (1 << 24)
-#define MCC2_RMII_1_CLOCK_SELECT       (1 << 23)
-#define MCC2_GMII_TX1_CLOCK_SELECT     (1 << 22)
-#define MCC2_GMII_RX1_CLOCK_SELECT     (1 << 21)
-#define MCC2_ETHER125_2_CLOCK_SELECT   (1 << 19)
-#define MCC2_RMII_2_CLOCK_SELECT       (1 << 18)
-#define MCC2_GMII_TX2_CLOCK_SELECT     (1 << 17)
-#define MCC2_GMII_RX2_CLOCK_SELECT     (1 << 16)
-
-#define ETHER_CLK_CONFIG       (MCC2_GMII_GCLK_TO_PAD |        \
-                                MCC2_ETHER125_0_CLOCK_SELECT | \
-                                MCC2_RMII_0_CLOCK_SELECT |     \
-                                MCC2_GMII_TX0_CLOCK_SELECT |   \
-                                MCC2_GMII_RX0_CLOCK_SELECT |   \
-                                MCC2_ETHER125_1_CLOCK_SELECT | \
-                                MCC2_RMII_1_CLOCK_SELECT |     \
-                                MCC2_GMII_TX1_CLOCK_SELECT |   \
-                                MCC2_GMII_RX1_CLOCK_SELECT |   \
-                                MCC2_ETHER125_2_CLOCK_SELECT | \
-                                MCC2_RMII_2_CLOCK_SELECT |     \
-                                MCC2_GMII_TX2_CLOCK_SELECT |   \
-                                MCC2_GMII_RX2_CLOCK_SELECT)
-
-/* misc_clk_ctl2 definitions for Gaia */
-#define FSX4A_REF_SELECT               (1 << 16)
-#define FSX4B_REF_SELECT               (1 << 17)
-#define FSX4C_REF_SELECT               (1 << 18)
-#define DDR_PLL_REF_SELECT             (1 << 19)
-#define MIPS_PLL_REF_SELECT            (1 << 20)
-
-/* Definitions for the QAM frequency select register FS432X4A4_QAM_CTL */
-#define QAM_FS_SDIV_SHIFT              29
-#define QAM_FS_MD_SHIFT                        24
-#define QAM_FS_MD_MASK                 0x1f    /* Cut down to 5 bits */
-#define QAM_FS_PE_SHIFT                        8
-
-#define QAM_FS_DISABLE_DIVIDE_BY_3             (1 << 5)
-#define QAM_FS_ENABLE_PROGRAM                  (1 << 4)
-#define QAM_FS_ENABLE_OUTPUT                   (1 << 3)
-#define QAM_FS_SELECT_TEST_BYPASS              (1 << 2)
-#define QAM_FS_DISABLE_DIGITAL_STANDBY         (1 << 1)
-#define QAM_FS_CHOOSE_FS                       (1 << 0)
-
-/* Definitions for fs432x4a_ctl register */
-#define QAM_FS_NSDIV_54MHZ                     (1 << 2)
-
-/* Definitions for bcm1_usb2_ctl register */
-#define BCM1_USB2_CTL_BISTOK                           (1 << 11)
-#define BCM1_USB2_CTL_PORT2_SHIFT_JK                   (1 << 7)
-#define BCM1_USB2_CTL_PORT1_SHIFT_JK                   (1 << 6)
-#define BCM1_USB2_CTL_PORT2_FAST_EDGE                  (1 << 5)
-#define BCM1_USB2_CTL_PORT1_FAST_EDGE                  (1 << 4)
-#define BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH         (1 << 1)
-#define BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH    (1 << 0)
-
-/* Definitions for crt_spare register */
-#define CRT_SPARE_PORT2_SHIFT_JK                       (1 << 21)
-#define CRT_SPARE_PORT1_SHIFT_JK                       (1 << 20)
-#define CRT_SPARE_PORT2_FAST_EDGE                      (1 << 19)
-#define CRT_SPARE_PORT1_FAST_EDGE                      (1 << 18)
-#define CRT_SPARE_DIVIDE_BY_9_FROM_432                 (1 << 17)
-#define CRT_SPARE_USB_DIVIDE_BY_9                      (1 << 16)
-
-/* Definitions for usb2_stbus_obc register */
-#define USB_STBUS_OBC_STORE32_LOAD32                   0x3
-
-/* Definitions for usb2_stbus_mess_size register */
-#define USB2_STBUS_MESS_SIZE_2                         0x1     /* 2 packets */
-
-/* Definitions for usb2_stbus_chunk_size register */
-#define USB2_STBUS_CHUNK_SIZE_2                                0x1     /* 2 packets */
-
-/* Definitions for usb2_strap register */
-#define USB2_STRAP_HFREQ_SELECT                                0x1
-
-/*
- * USB Host Resource Definition
- */
-
-static struct resource ehci_resources[] = {
-       {
-               .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = irq_usbehci,
-               .end    = irq_usbehci,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static u64 ehci_dmamask = 0xffffffffULL;
-
-static struct platform_device ehci_device = {
-       .name = "powertv-ehci",
-       .id = 0,
-       .num_resources = 2,
-       .resource = ehci_resources,
-       .dev = {
-               .dma_mask = &ehci_dmamask,
-               .coherent_dma_mask = 0xffffffff,
-       },
-};
-
-static struct resource ohci_resources[] = {
-       {
-               .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = irq_usbohci,
-               .end    = irq_usbohci,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static u64 ohci_dmamask = 0xffffffffULL;
-
-static struct platform_device ohci_device = {
-       .name = "powertv-ohci",
-       .id = 0,
-       .num_resources = 2,
-       .resource = ohci_resources,
-       .dev = {
-               .dma_mask = &ohci_dmamask,
-               .coherent_dma_mask = 0xffffffff,
-       },
-};
-
-static unsigned usb_users;
-static DEFINE_SPINLOCK(usb_regs_lock);
-
-/*
- *
- * fs_update - set frequency synthesizer for USB
- * @pe_bits            Phase tap setting
- * @md_bits            Coarse selector bus for algorithm of phase tap
- * @sdiv_bits          Output divider setting
- * @disable_div_by_3   Either QAM_FS_DISABLE_DIVIDE_BY_3 or zero
- * @standby            Either QAM_FS_DISABLE_DIGITAL_STANDBY or zero
- *
- * QAM frequency selection code, which affects the frequency at which USB
- * runs. The frequency is calculated as:
- *                            2^15 * ndiv * Fin
- * Fout = ------------------------------------------------------------
- *       (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
- * where:
- * Fin         54 MHz
- * ndiv                QAM_FS_NSDIV_54MHZ ? 8 : 16
- * sdiv                1 << (sdiv_bits + 1)
- * ipe         Same as pe_bits
- * md          A five-bit, two's-complement integer (range [-16, 15]), which
- *             is the lower 5 bits of md_bits.
- */
-static void fs_update(u32 pe_bits, int md_bits, u32 sdiv_bits,
-       u32 disable_div_by_3, u32 standby)
-{
-       u32 val;
-
-       val = ((sdiv_bits << QAM_FS_SDIV_SHIFT) |
-               ((md_bits & QAM_FS_MD_MASK) << QAM_FS_MD_SHIFT) |
-               (pe_bits << QAM_FS_PE_SHIFT) |
-               QAM_FS_ENABLE_OUTPUT |
-               standby |
-               disable_div_by_3);
-       asic_write(val, fs432x4b4_usb_ctl);
-       asic_write(val | QAM_FS_ENABLE_PROGRAM, fs432x4b4_usb_ctl);
-       asic_write(val | QAM_FS_ENABLE_PROGRAM | QAM_FS_CHOOSE_FS,
-               fs432x4b4_usb_ctl);
-}
-
-/*
- * usb_eye_configure - for optimizing the shape USB eye waveform
- * @set:       Bits to set in the register
- * @clear:     Bits to clear in the register; each bit with a one will
- *             be set in the register, zero bits will not be modified
- */
-static void usb_eye_configure(u32 set, u32 clear)
-{
-       u32 old;
-
-       old = asic_read(crt_spare);
-       old |= set;
-       old &= ~clear;
-       asic_write(old, crt_spare);
-}
-
-/*
- * platform_configure_usb - usb configuration based on platform type.
- */
-static void platform_configure_usb(void)
-{
-       u32 bcm1_usb2_ctl_value;
-       enum asic_type asic_type;
-       unsigned long flags;
-
-       spin_lock_irqsave(&usb_regs_lock, flags);
-       usb_users++;
-
-       if (usb_users != 1) {
-               spin_unlock_irqrestore(&usb_regs_lock, flags);
-               return;
-       }
-
-       asic_type = platform_get_asic();
-
-       switch (asic_type) {
-       case ASIC_ZEUS:
-               fs_update(0x0000, -15, 0x02, 0, 0);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_CRONUS:
-       case ASIC_CRONUSLITE:
-               usb_eye_configure(0, CRT_SPARE_USB_DIVIDE_BY_9);
-               fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_CALLIOPE:
-               fs_update(0x0000, -15, 0x02, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-
-               switch (platform_get_family()) {
-               case FAMILY_1500VZE:
-                       break;
-
-               case FAMILY_1500VZF:
-                       usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
-                               CRT_SPARE_PORT1_SHIFT_JK |
-                               CRT_SPARE_PORT2_FAST_EDGE |
-                               CRT_SPARE_PORT1_FAST_EDGE, 0);
-                       break;
-
-               default:
-                       usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
-                               CRT_SPARE_PORT1_SHIFT_JK, 0);
-                       break;
-               }
-
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
-                       BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_GAIA:
-               fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
-                       BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       default:
-               pr_err("Unknown ASIC type: %d\n", asic_type);
-               bcm1_usb2_ctl_value = 0;
-               break;
-       }
-
-       /* turn on USB power */
-       asic_write(0, usb2_strap);
-       /* Enable all OHCI interrupts */
-       asic_write(bcm1_usb2_ctl_value, usb2_control);
-       /* usb2_stbus_obc store32/load32 */
-       asic_write(USB_STBUS_OBC_STORE32_LOAD32, usb2_stbus_obc);
-       /* usb2_stbus_mess_size 2 packets */
-       asic_write(USB2_STBUS_MESS_SIZE_2, usb2_stbus_mess_size);
-       /* usb2_stbus_chunk_size 2 packets */
-       asic_write(USB2_STBUS_CHUNK_SIZE_2, usb2_stbus_chunk_size);
-       spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-static void platform_unconfigure_usb(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&usb_regs_lock, flags);
-       usb_users--;
-       if (usb_users == 0)
-               asic_write(USB2_STRAP_HFREQ_SELECT, usb2_strap);
-       spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-/*
- * Set up the USB EHCI interface
- */
-void platform_configure_usb_ehci()
-{
-       platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ehci);
-
-/*
- * Set up the USB OHCI interface
- */
-void platform_configure_usb_ohci()
-{
-       platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ohci);
-
-/*
- * Shut the USB EHCI interface down
- */
-void platform_unconfigure_usb_ehci()
-{
-       platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ehci);
-
-/*
- * Shut the USB OHCI interface down
- */
-void platform_unconfigure_usb_ohci()
-{
-       platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ohci);
-
-/**
- * platform_devices_init - sets up USB device resourse.
- */
-int __init platform_usb_devices_init(struct platform_device **ehci_dev,
-       struct platform_device **ohci_dev)
-{
-       *ehci_dev = &ehci_device;
-       ehci_resources[0].start = asic_reg_phys_addr(ehci_hcapbase);
-       ehci_resources[0].end += ehci_resources[0].start;
-
-       *ohci_dev = &ohci_device;
-       ohci_resources[0].start = asic_reg_phys_addr(ohci_hc_revision);
-       ohci_resources[0].end += ohci_resources[0].start;
-
-       return 0;
-}
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
deleted file mode 100644 (file)
index 24689bf..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/screen_info.h>
-#include <linux/notifier.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/ctype.h>
-#include <linux/cpu.h>
-#include <linux/time.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mips-boards/generic.h>
-#include <asm/dma.h>
-#include <asm/asm.h>
-#include <asm/traps.h>
-#include <asm/asm-offsets.h>
-#include "reset.h"
-
-#define VAL(n)         STR(n)
-
-/*
- * Macros for loading addresses and storing registers:
- * LONG_L_     Stringified version of LONG_L for use in asm() statement
- * LONG_S_     Stringified version of LONG_S for use in asm() statement
- * PTR_LA_     Stringified version of PTR_LA for use in asm() statement
- * REG_SIZE    Number of 8-bit bytes in a full width register
- */
-#define LONG_L_                VAL(LONG_L) " "
-#define LONG_S_                VAL(LONG_S) " "
-#define PTR_LA_                VAL(PTR_LA) " "
-
-#ifdef CONFIG_64BIT
-#warning TODO: 64-bit code needs to be verified
-#define REG_SIZE       "8"             /* In bytes */
-#endif
-
-#ifdef CONFIG_32BIT
-#define REG_SIZE       "4"             /* In bytes */
-#endif
-
-static void register_panic_notifier(void);
-static int panic_handler(struct notifier_block *notifier_block,
-       unsigned long event, void *cause_string);
-
-const char *get_system_type(void)
-{
-       return "PowerTV";
-}
-
-void __init plat_mem_setup(void)
-{
-       panic_on_oops = 1;
-       register_panic_notifier();
-
-#if 0
-       mips_pcibios_init();
-#endif
-       mips_reboot_setup();
-}
-
-/*
- * Install a panic notifier for platform-specific diagnostics
- */
-static void register_panic_notifier()
-{
-       static struct notifier_block panic_notifier = {
-               .notifier_call = panic_handler,
-               .next = NULL,
-               .priority       = INT_MAX
-       };
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
-}
-
-static int panic_handler(struct notifier_block *notifier_block,
-       unsigned long event, void *cause_string)
-{
-       struct pt_regs  my_regs;
-
-       /* Save all of the registers */
-       {
-               unsigned long   at, v0, v1; /* Must be on the stack */
-
-               /* Start by saving $at and v0 on the stack. We use $at
-                * ourselves, but it looks like the compiler may use v0 or v1
-                * to load the address of the pt_regs structure. We'll come
-                * back later to store the registers in the pt_regs
-                * structure. */
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-                       LONG_S_         "$at, %[at]\n"
-                       LONG_S_         "$2, %[v0]\n"
-                       LONG_S_         "$3, %[v1]\n"
-               :
-                       [at] "=m" (at),
-                       [v0] "=m" (v0),
-                       [v1] "=m" (v1)
-               :
-               :       "at"
-               );
-
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-                       "move           $at, %[pt_regs]\n"
-
-                       /* Argument registers */
-                       LONG_S_         "$4, " VAL(PT_R4) "($at)\n"
-                       LONG_S_         "$5, " VAL(PT_R5) "($at)\n"
-                       LONG_S_         "$6, " VAL(PT_R6) "($at)\n"
-                       LONG_S_         "$7, " VAL(PT_R7) "($at)\n"
-
-                       /* Temporary regs */
-                       LONG_S_         "$8, " VAL(PT_R8) "($at)\n"
-                       LONG_S_         "$9, " VAL(PT_R9) "($at)\n"
-                       LONG_S_         "$10, " VAL(PT_R10) "($at)\n"
-                       LONG_S_         "$11, " VAL(PT_R11) "($at)\n"
-                       LONG_S_         "$12, " VAL(PT_R12) "($at)\n"
-                       LONG_S_         "$13, " VAL(PT_R13) "($at)\n"
-                       LONG_S_         "$14, " VAL(PT_R14) "($at)\n"
-                       LONG_S_         "$15, " VAL(PT_R15) "($at)\n"
-
-                       /* "Saved" registers */
-                       LONG_S_         "$16, " VAL(PT_R16) "($at)\n"
-                       LONG_S_         "$17, " VAL(PT_R17) "($at)\n"
-                       LONG_S_         "$18, " VAL(PT_R18) "($at)\n"
-                       LONG_S_         "$19, " VAL(PT_R19) "($at)\n"
-                       LONG_S_         "$20, " VAL(PT_R20) "($at)\n"
-                       LONG_S_         "$21, " VAL(PT_R21) "($at)\n"
-                       LONG_S_         "$22, " VAL(PT_R22) "($at)\n"
-                       LONG_S_         "$23, " VAL(PT_R23) "($at)\n"
-
-                       /* Add'l temp regs */
-                       LONG_S_         "$24, " VAL(PT_R24) "($at)\n"
-                       LONG_S_         "$25, " VAL(PT_R25) "($at)\n"
-
-                       /* Kernel temp regs */
-                       LONG_S_         "$26, " VAL(PT_R26) "($at)\n"
-                       LONG_S_         "$27, " VAL(PT_R27) "($at)\n"
-
-                       /* Global pointer, stack pointer, frame pointer and
-                        * return address */
-                       LONG_S_         "$gp, " VAL(PT_R28) "($at)\n"
-                       LONG_S_         "$sp, " VAL(PT_R29) "($at)\n"
-                       LONG_S_         "$fp, " VAL(PT_R30) "($at)\n"
-                       LONG_S_         "$ra, " VAL(PT_R31) "($at)\n"
-
-                       /* Now we can get the $at and v0 registers back and
-                        * store them */
-                       LONG_L_         "$8, %[at]\n"
-                       LONG_S_         "$8, " VAL(PT_R1) "($at)\n"
-                       LONG_L_         "$8, %[v0]\n"
-                       LONG_S_         "$8, " VAL(PT_R2) "($at)\n"
-                       LONG_L_         "$8, %[v1]\n"
-                       LONG_S_         "$8, " VAL(PT_R3) "($at)\n"
-               :
-               :
-                       [at] "m" (at),
-                       [v0] "m" (v0),
-                       [v1] "m" (v1),
-                       [pt_regs] "r" (&my_regs)
-               :       "at", "t0"
-               );
-
-               /* Set the current EPC value to be the current location in this
-                * function */
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-               "1:\n"
-                       PTR_LA_         "$at, 1b\n"
-                       LONG_S_         "$at, %[cp0_epc]\n"
-               :
-                       [cp0_epc] "=m" (my_regs.cp0_epc)
-               :
-               :       "at"
-               );
-
-               my_regs.cp0_cause = read_c0_cause();
-               my_regs.cp0_status = read_c0_status();
-       }
-
-       pr_crit("I'm feeling a bit sleepy. hmmmmm... perhaps a nap would... "
-               "zzzz... \n");
-
-       return NOTIFY_DONE;
-}
-
-/* Information about the RF MAC address, if one was supplied on the
- * command line. */
-static bool have_rfmac;
-static u8 rfmac[ETH_ALEN];
-
-static int rfmac_param(char *p)
-{
-       u8      *q;
-       bool    is_high_nibble;
-       int     c;
-
-       /* Skip a leading "0x", if present */
-       if (*p == '0' && *(p+1) == 'x')
-               p += 2;
-
-       q = rfmac;
-       is_high_nibble = true;
-
-       for (c = (unsigned char) *p++;
-               isxdigit(c) && q - rfmac < ETH_ALEN;
-               c = (unsigned char) *p++) {
-               int     nibble;
-
-               nibble = (isdigit(c) ? (c - '0') :
-                       (isupper(c) ? c - 'A' + 10 : c - 'a' + 10));
-
-               if (is_high_nibble)
-                       *q = nibble << 4;
-               else
-                       *q++ |= nibble;
-
-               is_high_nibble = !is_high_nibble;
-       }
-
-       /* If we parsed all the way to the end of the parameter value and
-        * parsed all ETH_ALEN bytes, we have a usable RF MAC address */
-       have_rfmac = (c == '\0' && q - rfmac == ETH_ALEN);
-
-       return 0;
-}
-
-early_param("rfmac", rfmac_param);
-
-/*
- * Generate an Ethernet MAC address that has a good chance of being unique.
- * @addr:      Pointer to six-byte array containing the Ethernet address
- * Generates an Ethernet MAC address that is highly likely to be unique for
- * this particular system on a network with other systems of the same type.
- *
- * The problem we are solving is that, when eth_random_addr() is used to
- * generate MAC addresses at startup, there isn't much entropy for the random
- * number generator to use and the addresses it produces are fairly likely to
- * be the same as those of other identical systems on the same local network.
- * This is true even for relatively small numbers of systems (for the reason
- * why, see the Wikipedia entry for "Birthday problem" at:
- *     http://en.wikipedia.org/wiki/Birthday_problem
- *
- * The good news is that we already have a MAC address known to be unique, the
- * RF MAC address. The bad news is that this address is already in use on the
- * RF interface. Worse, the obvious trick, taking the RF MAC address and
- * turning on the locally managed bit, has already been used for other devices.
- * Still, this does give us something to work with.
- *
- * The approach we take is:
- * 1.  If we can't get the RF MAC Address, just call eth_random_addr.
- * 2.  Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
- *     bits of the new address. This is very likely to be unique, except for
- *     the current box.
- * 3.  To avoid using addresses already on the current box, we set the top
- *     six bits of the address with a value different from any currently
- *     registered Scientific Atlanta organizationally unique identifyer
- *     (OUI). This avoids duplication with any addresses on the system that
- *     were generated from valid Scientific Atlanta-registered address by
- *     simply flipping the locally managed bit.
- * 4.  We aren't generating a multicast address, so we leave the multicast
- *     bit off. Since we aren't using a registered address, we have to set
- *     the locally managed bit.
- * 5.  We then randomly generate the remaining 16-bits. This does two
- *     things:
- *     a.      It allows us to call this function for more than one device
- *             in this system
- *     b.      It ensures that things will probably still work even if
- *             some device on the device network has a locally managed
- *             address that matches the top six bits from step 2.
- */
-void platform_random_ether_addr(u8 addr[ETH_ALEN])
-{
-       const int num_random_bytes = 2;
-       const unsigned char non_sciatl_oui_bits = 0xc0u;
-       const unsigned char mac_addr_locally_managed = (1 << 1);
-
-       if (!have_rfmac) {
-               pr_warning("rfmac not available on command line; "
-                       "generating random MAC address\n");
-               eth_random_addr(addr);
-       }
-
-       else {
-               int     i;
-
-               /* Set the first byte to something that won't match a Scientific
-                * Atlanta OUI, is locally managed, and isn't a multicast
-                * address */
-               addr[0] = non_sciatl_oui_bits | mac_addr_locally_managed;
-
-               /* Get some bytes of random address information */
-               get_random_bytes(&addr[1], num_random_bytes);
-
-               /* Copy over the NIC-specific bits of the RF MAC address */
-               for (i = 1 + num_random_bytes; i < ETH_ALEN; i++)
-                       addr[i] = rfmac[i];
-       }
-}
diff --git a/arch/mips/powertv/reset.c b/arch/mips/powertv/reset.c
deleted file mode 100644 (file)
index 11c32fb..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/pm.h>
-
-#include <linux/io.h>
-#include <asm/reboot.h>                        /* Not included by linux/reboot.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "reset.h"
-
-static void mips_machine_restart(char *command)
-{
-       writel(0x1, asic_reg_addr(watchdog));
-}
-
-void mips_reboot_setup(void)
-{
-       _machine_restart = mips_machine_restart;
-}
diff --git a/arch/mips/powertv/reset.h b/arch/mips/powertv/reset.h
deleted file mode 100644 (file)
index 888fd09..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Definitions from powertv reset.c file
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_RESET_H
-#define _POWERTV_POWERTV_RESET_H
-extern void mips_reboot_setup(void);
-#endif
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
deleted file mode 100644 (file)
index f38b0d4..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Setting up the clock on the MIPS boards.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/interrupts.h>
-#include <asm/time.h>
-
-#include "powertv-clock.h"
-
-unsigned int get_c0_compare_int(void)
-{
-       return irq_mips_timer;
-}
-
-void __init plat_time_init(void)
-{
-       powertv_clocksource_init();
-}
index bba0cdfd83bcd37b2278dce3fdec6f8980bfbb19..5d0983d47161c183033b9943eada1edb93b797a3 100644 (file)
@@ -26,7 +26,7 @@ void ralink_clk_add(const char *dev, unsigned long rate)
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
        if (!clk)
-               panic("failed to add clock\n");
+               panic("failed to add clock");
 
        clk->cl.dev_id = dev;
        clk->cl.clk = clk;
index d217509e530093f0154783c32f8b016ff419f8a4..a3ad56c2372d0fcd261a1ff9dfea333a528f601f 100644 (file)
@@ -350,7 +350,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name = "MT7620A";
                soc_info->compatible = "ralink,mt7620a-soc";
        } else {
-               panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+               panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1);
        }
 
        rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
index ce38d11f9da5bdb3baaec1821dec2a6a25ae4836..15f21ea96121023b60d005045a3115bac48c5622 100644 (file)
@@ -108,7 +108,7 @@ static int __init plat_of_setup(void)
        strncpy(of_ids[1].compatible, "palmbus", len);
 
        if (of_platform_populate(NULL, of_ids, NULL, NULL))
-               panic("failed to populate DT\n");
+               panic("failed to populate DT");
 
        /* make sure ithat the reset controller is setup early */
        ralink_rst_init();
index ca7ee3a33790fc074ed4788f8fb1e1412babe187..bb82a82da9e70736160bb911a432477b3a81fdfe 100644 (file)
@@ -276,7 +276,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name = "RT5350";
                soc_info->compatible = "ralink,rt5350-soc";
        } else {
-               panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+               panic("rt305x: unknown SoC, n0:%08x n1:%08x", n0, n1);
        }
 
        id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
index e2a2b203eb005b5403f2b68f92a841d87137b75c..71dedcae55a69c4ea93c5c71c7edfd93583864a9 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index eb59bfe23e8510c7c812d43496860bcee37974b7..93c9980e1b6b61a93c81dc93a4c0902ee59a451c 100644 (file)
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-
-#include <linux/of.h>  /* linux/of.h gets to determine #include ordering */
-
 #ifndef _ASM_OPENRISC_PROM_H
 #define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
 
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev:      the device whose interrupt is to be resolved
- * @out_irq:   structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* _ASM_OPENRISC_PROM_H */
index ad2ce8dab9963c4dd6867ee83064460576084089..56c9cb7c8bcf9eb16188e2d60b9bfa1a386e1dcd 100644 (file)
@@ -1,6 +1,7 @@
 config PARISC
        def_bool y
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_FUNCTION_TRACER if 64BIT
@@ -287,6 +288,10 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
+config AUDIT_ARCH
+       def_bool y
+       depends on COMPAT
+
 config HPUX
        bool "Support for HP-UX binaries"
        depends on !64BIT
index e02f665f804a53ae73761fa3546880150eff766f..7187664034c3499b92c62210d0d347b22ae24adc 100644 (file)
@@ -94,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
        else echo $(obj)/palo.conf; \
        fi)
 
-palo: vmlinuz
+palo lifimage: vmlinuz
        @if test ! -x "$(PALO)"; then \
                echo 'ERROR: Please install palo first (apt-get install palo)';\
                echo 'or build it from source and install it somewhere in your $$PATH';\
@@ -109,16 +109,23 @@ palo: vmlinuz
        fi
        $(PALO) -f $(PALOCONF)
 
-# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default
+BOOT_TARGETS    = zImage Image palo lifimage
+INSTALL_TARGETS = zinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+bzImage zImage: vmlinuz
 Image: vmlinux
-zImage bzImage: vmlinuz
 
 vmlinuz: vmlinux
        @gzip -cf -9 $< > $@
 
-install: vmlinuz
-       sh $(src)/arch/parisc/install.sh \
-                       $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
+install:
+       $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+                       $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
+zinstall:
+       $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+                       $(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)"
 
 CLEAN_FILES    += lifimage
 MRPROPER_FILES += palo.conf
@@ -127,10 +134,11 @@ define archhelp
        @echo  '* vmlinux       - Uncompressed kernel image (./vmlinux)'
        @echo  '  vmlinuz       - Compressed kernel image (./vmlinuz)'
        @echo  '  palo          - Bootable image (./lifimage)'
-       @echo  '  install       - Install kernel using'
+       @echo  '  install       - Install uncompressed vmlinux kernel using'
        @echo  '                  (your) ~/bin/$(INSTALLKERNEL) or'
        @echo  '                  (distribution) /sbin/$(INSTALLKERNEL) or'
        @echo  '                  copy to $$(INSTALL_PATH)'
+       @echo  '  zinstall      - Install compressed vmlinuz kernel'
 endef
 
 # we require gcc 3.3 or above to compile the kernel
index 0f90569b9d8546f7fa710a5b15f7883266e1ba95..9387cc2693f6a33a70459fcb46da058db395e2c4 100644 (file)
@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
index b647b182dacc17d6cf95fa79cee892fd8e0427bf..90025322b75ecd3b8a5672b76d18ce534b92ee2f 100644 (file)
@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index e289f5bf31488f0bb9eff29bb51bcbf25abd3bd1..f1a0c25bef8dc3a6667c608a092f96a39d5994aa 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index 311ca367b62237b87bb78023f7687ef1f7d98c85..ec1b014952b6601458f4c3b2901d8e86670b96fc 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
 CONFIG_IP_NF_QUEUE=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index f11006361297eb152cf5f78471f6c69de41e5cfc..e1c8d2015c8938ac0a3440d38af427b4ac8eec7a 100644 (file)
@@ -62,6 +62,8 @@ CONFIG_TIPC=m
 CONFIG_LLC2=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index dfe88f6c95c4d7c924b9e03d8c7abd236f100609..ba61495e1fa4b8d9ee576aff9a7559e619f229b6 100644 (file)
@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_LLC2=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
new file mode 100644 (file)
index 0000000..33b148f
--- /dev/null
@@ -0,0 +1,328 @@
+CONFIG_LOCALVERSION="-32bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_LLC2=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_KEYBOARD_HIL_OLD=m
+CONFIG_KEYBOARD_HIL=m
+CONFIG_MOUSE_SERIAL=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_VOODOO1=m
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AD1889=m
+CONFIG_SND_HARMONY=m
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_DMADEVICES=y
+CONFIG_AUXDISPLAY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_RT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+# CONFIG_NFS_V2 is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
new file mode 100644 (file)
index 0000000..d7f5126
--- /dev/null
@@ -0,0 +1,345 @@
+CONFIG_LOCALVERSION="-64bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+# CONFIG_COMPACTION is not set
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=m
+CONFIG_INET_DIAG=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_DCB=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_IDE=y
+CONFIG_IDE_GD=m
+CONFIG_IDE_GD_ATAPI=y
+CONFIG_BLK_DEV_IDECD=m
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SIIMAGE=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_RAID=m
+CONFIG_DM_UEVENT=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+CONFIG_HP100=m
+CONFIG_E1000=y
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_HP_SDC_RTC=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_HP_SDC=m
+CONFIG_HIL_MLC=m
+CONFIG_SERIO_RAW=m
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_NOZOMI=m
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_JSM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_ATMEL=m
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_HTC_PASIC3=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_SM501=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_RADEON_UMS=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_STAGING=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_SYSV_FS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFS_V4_1=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_LIBCRC32C=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
index 0da848232344fc41d9583cc10f8510a54a7d6a16..b3069fd83468c5972f98d19f8f17dfa9bfb5e0cf 100644 (file)
        nop     /* 7 */
        .endm
 
+       /*
+        * ASM_EXCEPTIONTABLE_ENTRY
+        *
+        * Creates an exception table entry.
+        * Do not convert to a assembler macro. This won't work.
+        */
+#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr)      \
+       .section __ex_table,"aw"                        !       \
+       ASM_ULONG_INSN  fault_addr, except_addr         !       \
+       .previous
+
+
 #endif /* __ASSEMBLY__ */
 #endif
index a2db278a5def69c4a6ac4745c3685e45600df156..3c3cb004b7e225344c9850805cce04192655e7d2 100644 (file)
@@ -19,5 +19,9 @@
 #define user_stack_pointer(regs)       ((regs)->gr[30])
 unsigned long profile_pc(struct pt_regs *);
 
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+       return regs->gr[20];
+}
 
 #endif
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644 (file)
index 0000000..8d806d8
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK  0x40000000
+#endif /* _ASM_SOCKET_H */
index 540c88fa8f863d44adcc254fe9a3917cf015aa4d..bc7cf120106b30e477264850e373f346eeb9bf82 100644 (file)
@@ -59,6 +59,7 @@ struct thread_info {
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
+#define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
 #define TIF_BLOCKSTEP          10      /* branch stepping? */
@@ -68,6 +69,7 @@ struct thread_info {
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_32BIT             (1 << TIF_32BIT)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
@@ -75,7 +77,7 @@ struct thread_info {
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |        \
-                                _TIF_BLOCKSTEP)
+                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
 
 #endif /* __KERNEL__ */
 
index 1945f995f2dfec1212ed8e1769168085076d9bb1..4736020ba5eabeb05841e5a9dc4834026f61a92d 100644 (file)
@@ -6,7 +6,7 @@ struct pt_regs;
 
 /* traps.c */
 void parisc_terminate(char *msg, struct pt_regs *regs,
-               int code, unsigned long offset);
+               int code, unsigned long offset) __noreturn __cold;
 
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
index e0a82358517e032677cd563ae7aad33e37fef11f..4006964d8e12646761d954b9f73ff0b503e736b6 100644 (file)
@@ -59,12 +59,13 @@ static inline long access_ok(int type, const void __user * addr,
 /*
  * The exception table contains two values: the first is an address
  * for an instruction that is allowed to fault, and the second is
- * the address to the fixup routine. 
+ * the address to the fixup routine. Even on a 64bit kernel we could
+ * use a 32bit (unsigned int) address here.
  */
 
 struct exception_table_entry {
-       unsigned long insn;  /* address of insn that is allowed to fault.   */
-       long fixup;          /* fixup routine */
+       unsigned long insn;     /* address of insn that is allowed to fault. */
+       unsigned long fixup;    /* fixup routine */
 };
 
 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
index 71700e636a8e7b25172cda7f4815b1e46216b3d3..f33113a6141e7540da2195cc72469152edfbecf2 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
 
 #include <asm/sockios.h>
 
@@ -75,9 +75,6 @@
 
 #define SO_BUSY_POLL           0x4027
 
-/* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
- * have to define SOCK_NONBLOCK to a different value here.
- */
-#define SOCK_NONBLOCK   0x40000000
+#define SO_MAX_PACING_RATE     0x4048
 
-#endif /* _ASM_SOCKET_H */
+#endif /* _UAPI_ASM_SOCKET_H */
index 4da682b466d06fd0806b4b3fd515f02fbacd4009..6f68784fea25f9141b195db9ec7b532f3f01da9c 100644 (file)
 #   $4 - default install path (blank if root directory)
 #
 
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
 # User may have a custom install script
 
-if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
-if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+if [ -n "${INSTALLKERNEL}" ]; then
+  if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
 
 # Default install
 
-if [ -f $4/vmlinuz ]; then
-       mv $4/vmlinuz $4/vmlinuz.old
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
 fi
+cat $2 > $4/$base-$1
 
-if [ -f $4/System.map ]; then
-       mv $4/System.map $4/System.old
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
 fi
+cp $3 $4/System.map-$1
 
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
index 66ee3f12df5880db00852baad257c9ad895d11a4..ad1e3a68208cce20fed402b2388986e863b8d83e 100644 (file)
@@ -31,5 +31,6 @@ obj-$(CONFIG_64BIT)   += binfmt_elf32.o sys_parisc32.o signal32.o
 obj-$(CONFIG_STACKTRACE)+= stacktrace.o
 # only supported for PCX-W/U in 64-bit mode at the moment
 obj-$(CONFIG_64BIT)    += perf.o perf_asm.o
+obj-$(CONFIG_AUDIT_ARCH) += audit.o compat_audit.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
new file mode 100644 (file)
index 0000000..eb64a61
--- /dev/null
@@ -0,0 +1,81 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+#ifdef CONFIG_COMPAT
+       if (arch == AUDIT_ARCH_PARISC)
+               return 1;
+#endif
+       return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_COMPAT
+       extern int parisc32_classify_syscall(unsigned);
+       if (abi == AUDIT_ARCH_PARISC)
+               return parisc32_classify_syscall(syscall);
+#endif
+       switch (syscall) {
+       case __NR_open:
+               return 2;
+       case __NR_openat:
+               return 3;
+       case __NR_execve:
+               return 5;
+       default:
+               return 0;
+       }
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_COMPAT
+       extern __u32 parisc32_dir_class[];
+       extern __u32 parisc32_write_class[];
+       extern __u32 parisc32_read_class[];
+       extern __u32 parisc32_chattr_class[];
+       extern __u32 parisc32_signal_class[];
+       audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
+       audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
+       audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
+       audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
+       audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
+#endif
+       audit_register_class(AUDIT_CLASS_WRITE, write_class);
+       audit_register_class(AUDIT_CLASS_READ, read_class);
+       audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+       audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+       audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+       return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
new file mode 100644 (file)
index 0000000..c74478f
--- /dev/null
@@ -0,0 +1,40 @@
+#include <asm/unistd.h>
+
+unsigned int parisc32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned int parisc32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned int parisc32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned int parisc32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned int parisc32_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int parisc32_classify_syscall(unsigned syscall)
+{
+       switch (syscall) {
+       case __NR_open:
+               return 2;
+       case __NR_openat:
+               return 3;
+       case __NR_execve:
+               return 5;
+       default:
+               return 1;
+       }
+}
index 534abd4936e1ecf96a8f872d9ba90f930c1aff9e..e842ee233db44ef902899280fbadd456175a4de3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/security.h>
 #include <linux/compat.h>
 #include <linux/signal.h>
+#include <linux/audit.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -267,11 +268,28 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
+       long ret = 0;
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
-               return -1L;
-
-       return regs->gr[20];
+               ret = -1L;
+
+#ifdef CONFIG_64BIT
+       if (!is_compat_task())
+               audit_syscall_entry(AUDIT_ARCH_PARISC64,
+                       regs->gr[20],
+                       regs->gr[26], regs->gr[25],
+                       regs->gr[24], regs->gr[23]);
+       else
+#endif
+               audit_syscall_entry(AUDIT_ARCH_PARISC,
+                       regs->gr[20] & 0xffffffff,
+                       regs->gr[26] & 0xffffffff,
+                       regs->gr[25] & 0xffffffff,
+                       regs->gr[24] & 0xffffffff,
+                       regs->gr[23] & 0xffffffff);
+
+       return ret ? : regs->gr[20];
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
@@ -279,6 +297,8 @@ void do_syscall_trace_exit(struct pt_regs *regs)
        int stepping = test_thread_flag(TIF_SINGLESTEP) ||
                test_thread_flag(TIF_BLOCKSTEP);
 
+       audit_syscall_exit(regs);
+
        if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, stepping);
 }
index 8a252f2d6c087aad6248e98178c79eef818c14da..2b96602e812ff9648f0ce9ec66b52e103ddb8a3e 100644 (file)
@@ -72,7 +72,6 @@ enum ipi_message_type {
        IPI_NOP=0,
        IPI_RESCHEDULE=1,
        IPI_CALL_FUNC,
-       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_START,
        IPI_CPU_STOP,
        IPI_CPU_TEST
@@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
                                generic_smp_call_function_interrupt();
                                break;
 
-                       case IPI_CALL_FUNC_SINGLE:
-                               smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
-                               generic_smp_call_function_single_interrupt();
-                               break;
-
                        case IPI_CPU_START:
                                smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
                                break;
@@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+       send_IPI_single(cpu, IPI_CALL_FUNC);
 }
 
 /*
index e767ab733e321e5619b919a9b8684ac70c35ba9b..a63bb179f79a1fcd56a7bcf1adbe759f46587b71 100644 (file)
@@ -649,10 +649,8 @@ cas_action:
        /* Two exception table entries, one for the load,
           the other for the store. Either return -EFAULT.
           Each of the entries must be relocated. */
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
-       ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
 
 
        /* Make sure nothing else is placed on this page */
index 04e47c6a45626347aa261d3725005cdafb9385ad..1cd1d0c83b6d7bd7a21d0a22c57e18f2ac27f65a 100644 (file)
@@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
        do_exit(SIGSEGV);
 }
 
-int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
-{
-       return syscall(regs);
-}
-
 /* gdb uses break 4,8 */
 #define GDB_BREAK_INSN 0x10004
 static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
        else {
 
            /*
-            * The kernel should never fault on its own address space.
+            * The kernel should never fault on its own address space,
+            * unless pagefault_disable() was called before.
             */
 
-           if (fault_space == 0
+           if (fault_space == 0 && !in_atomic())
            {
                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
                parisc_terminate("Kernel Fault", regs, code, fault_address);
-       
            }
        }
 
index 6f2d9355efe25af6ab90d4205a216c1c649c39a9..a512f07d4feba9bc2dd36f2f1d2f96ee26734348 100644 (file)
@@ -88,9 +88,7 @@ ENDPROC(lclear_user)
        ldo        1(%r25),%r25
        .previous
 
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,2b
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
 
        .procend
 
@@ -129,10 +127,8 @@ ENDPROC(lstrnlen_user)
        copy        %r24,%r26    /* reset r26 so 0 is returned on fault */
        .previous
 
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,3b
-       ASM_ULONG_INSN 2b,3b
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+       ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
 
        .procend
 
index ac4370b1ca4019f5eaf85f910097bf1cc69c0611..b5507ec06b846f09ed4d38c5841b4eecaffb156e 100644 (file)
@@ -56,7 +56,7 @@
 #ifdef __KERNEL__
 #include <linux/module.h>
 #include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #define s_space "%%sr1"
 #define d_space "%%sr2"
 #else
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(copy_from_user);
 EXPORT_SYMBOL(copy_in_user);
 EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+       unsigned long addr = (unsigned long)src;
+
+       if (size < 0 || addr < PAGE_SIZE)
+               return -EFAULT;
+
+       /* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+       return __probe_kernel_read(dst, src, size);
+}
+
 #endif
index d10d27a720c0d1f323c2248f01cc93193e73ca1e..df0d32971cdf0ec232d6ba5b5dd40394a4158529 100644 (file)
@@ -142,6 +142,12 @@ int fixup_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *fix;
 
+       /* If we only stored 32bit addresses in the exception table we can drop
+        * out if we faulted on a 64bit address. */
+       if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
+               && (regs->iaoq[0] >> 32))
+                       return 0;
+
        fix = search_exception_tables(regs->iaoq[0]);
        if (fix) {
                struct exception_data *d;
@@ -171,17 +177,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
        struct vm_area_struct *vma, *prev_vma;
-       struct task_struct *tsk = current;
-       struct mm_struct *mm = tsk->mm;
+       struct task_struct *tsk;
+       struct mm_struct *mm;
        unsigned long acc_type;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       unsigned int flags;
+
+       if (in_atomic())
+               goto no_context;
 
-       if (in_atomic() || !mm)
+       tsk = current;
+       mm = tsk->mm;
+       if (!mm)
                goto no_context;
 
+       flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       acc_type = parisc_acctyp(code, regs->iir);
        if (acc_type & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
 retry:
@@ -196,8 +210,6 @@ retry:
 
 good_area:
 
-       acc_type = parisc_acctyp(code,regs->iir);
-
        if ((vma->vm_flags & acc_type) != acc_type)
                goto bad_area;
 
index 38f3b7e47ec5efd190018de0d9bba1d2ef5b4011..e2e03a6d060f4543e76c8bf4c34b6a5886822fe3 100644 (file)
@@ -85,6 +85,7 @@ config GENERIC_HWEIGHT
 config PPC
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select BINFMT_ELF
        select OF
        select OF_EARLY_FLATTREE
@@ -97,7 +98,7 @@ config PPC
        select VIRT_TO_BUS if !PPC64
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
-       select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN
        select HAVE_KPROBES
        select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
@@ -139,6 +140,9 @@ config PPC
        select OLD_SIGACTION if PPC32
        select HAVE_DEBUG_STACKOVERFLOW
 
+config GENERIC_CSUM
+       def_bool CPU_LITTLE_ENDIAN
+
 config EARLY_PRINTK
        bool
        default y
@@ -1009,6 +1013,9 @@ config PHYSICAL_START
        default "0x00000000"
 endif
 
+config ARCH_RANDOM
+       def_bool n
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
index 51cfb78d4061d30781b916fe7a21a3fd81daec8c..607acf54a425b2b50913ea6b4f48024a5d21aadc 100644 (file)
@@ -36,17 +36,26 @@ KBUILD_DEFCONFIG := ppc64_defconfig
 endif
 
 ifeq ($(CONFIG_PPC64),y)
-OLDARCH        := ppc64
-
 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
 
 ifeq ($(new_nm),y)
 NM             := $(NM) --synthetic
 endif
+endif
 
+ifeq ($(CONFIG_PPC64),y)
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH        := ppc64le
+else
+OLDARCH        := ppc64
+endif
+else
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH        := ppcle
 else
 OLDARCH        := ppc
 endif
+endif
 
 # It seems there are times we use this Makefile without
 # including the config file, but this replicates the old behaviour
@@ -56,11 +65,29 @@ endif
 
 UTS_MACHINE := $(OLDARCH)
 
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+override CC    += -mlittle-endian -mno-strict-align
+override AS    += -mlittle-endian
+override LD    += -EL
+override CROSS32CC += -mlittle-endian
+override CROSS32AS += -mlittle-endian
+LDEMULATION    := lppc
+GNUTARGET      := powerpcle
+MULTIPLEWORD   := -mno-multiple
+else
+override CC    += -mbig-endian
+override AS    += -mbig-endian
+override LD    += -EB
+LDEMULATION    := ppc
+GNUTARGET      := powerpc
+MULTIPLEWORD   := -mmultiple
+endif
+
 ifeq ($(HAS_BIARCH),y)
 override AS    += -a$(CONFIG_WORD_SIZE)
-override LD    += -m elf$(CONFIG_WORD_SIZE)ppc
+override LD    += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
 override CC    += -m$(CONFIG_WORD_SIZE)
-override AR    := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR)
+override AR    := GNUTARGET=elf$(CONFIG_WORD_SIZE)-$(GNUTARGET) $(AR)
 endif
 
 LDFLAGS_vmlinux-y := -Bstatic
@@ -86,7 +113,7 @@ endif
 CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
+CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
 
 ifeq ($(CONFIG_PPC_BOOK3S_64),y)
 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
index 6a15c968d21453230ab469b5921fea28b790dcc5..ca7f08cc4afd770b528931bbab92edc55ad5e8db 100644 (file)
@@ -22,7 +22,8 @@ all: $(obj)/zImage
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -Os -msoft-float -pipe \
                 -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-                -isystem $(shell $(CROSS32CC) -print-file-name=include)
+                -isystem $(shell $(CROSS32CC) -print-file-name=include) \
+                -mbig-endian
 BOOTAFLAGS     := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
 
 ifdef CONFIG_DEBUG_INFO
@@ -74,7 +75,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
 src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
 
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
                                treeboot-walnut.c cuboot-acadia.c \
                                cuboot-kilauea.c simpleboot.c \
@@ -97,7 +98,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
                                        prpmc2800.c
 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644 (file)
index 0000000..c101910
--- /dev/null
@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+                               unsigned long r5, unsigned long r6,
+                               unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       epapr_platform_init(r3, r4, r5, r6, r7);
+}
index 06c1961bd124a06966da1c5c6a85a6d4452e951a..02e91aa2194a57a66766852444835607143fa29a 100644 (file)
@@ -48,8 +48,8 @@ static void platform_fixups(void)
                       fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
 }
 
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
-                  unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7)
 {
        epapr_magic = r6;
        ima_size = r7;
index 61d9899aa0d09d371c99ec70b7959693f8bfab36..62e2f43ec1df1144d3a790e1f3cf3e3263fdca47 100644 (file)
@@ -26,6 +26,9 @@
 
 static unsigned long claim_base;
 
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7);
+
 static void *of_try_claim(unsigned long size)
 {
        unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
        }
 }
 
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
 {
        platform_ops.image_hdr = of_image_hdr;
        platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
                loader_info.initrd_size = a2;
        }
 }
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       /* Detect OF vs. ePAPR boot */
+       if (r5)
+               of_platform_init(r3, r4, (void *)r5);
+       else
+               epapr_platform_init(r3, r4, r5, r6, r7);
+}
+
index 6761c746048df389812888b8430aca4f442e191c..cd7af841ba051f8725e8ab33dffae1a23201fc60 100755 (executable)
@@ -148,18 +148,18 @@ make_space=y
 
 case "$platform" in
 pseries)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x4000000'
     ;;
 maple)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x400000'
     ;;
 pmac|chrp)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     ;;
 coff)
-    platformo="$object/crt0.o $object/of.o"
+    platformo="$object/crt0.o $object/of.o $object/epapr.o"
     lds=$object/zImage.coff.lds
     link_address='0x500000'
     pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
     platformo="$object/treeboot-iss4xx.o"
     ;;
 epapr)
+    platformo="$object/epapr.o $object/epapr-wrapper.o"
     link_address='0x20000000'
     pie=-pie
     ;;
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644 (file)
index 0000000..d853d16
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/machdep.h>
+
+static inline int arch_get_random_long(unsigned long *v)
+{
+       if (ppc_md.get_random_long)
+               return ppc_md.get_random_long(v);
+
+       return 0;
+}
+
+static inline int arch_get_random_int(unsigned int *v)
+{
+       unsigned long val;
+       int rc;
+
+       rc = arch_get_random_long(&val);
+       if (rc)
+               *v = val;
+
+       return rc;
+}
+
+int powernv_get_random_long(unsigned long *v);
+
+#endif /* CONFIG_ARCH_RANDOM */
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
index ce0c28495f9a0416450a33a4ceee4b3a33be45c2..8251a3ba870f8cfd27a5b1c84c794ac719e34ace 100644 (file)
@@ -14,6 +14,9 @@
  * which always checksum on 4 octet boundaries.  ihl is the number
  * of 32-bit words and is always >= 5.
  */
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
@@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
        return sum;
 #endif
 }
+
+#endif
 #endif /* __KERNEL__ */
 #endif
index d3f64f361814cf4210ad9e968ccb747fe8c87048..d4a5315718ca454a6a4d01ceab9d4a0e767ac267 100644 (file)
@@ -25,7 +25,7 @@
 struct hvsi_header {
        uint8_t  type;
        uint8_t  len;
-       uint16_t seqno;
+       __be16 seqno;
 } __attribute__((packed));
 
 struct hvsi_data {
@@ -35,24 +35,24 @@ struct hvsi_data {
 
 struct hvsi_control {
        struct hvsi_header hdr;
-       uint16_t verb;
+       __be16 verb;
        /* optional depending on verb: */
-       uint32_t word;
-       uint32_t mask;
+       __be32 word;
+       __be32 mask;
 } __attribute__((packed));
 
 struct hvsi_query {
        struct hvsi_header hdr;
-       uint16_t verb;
+       __be16 verb;
 } __attribute__((packed));
 
 struct hvsi_query_response {
        struct hvsi_header hdr;
-       uint16_t verb;
-       uint16_t query_seqno;
+       __be16 verb;
+       __be16 query_seqno;
        union {
                uint8_t  version;
-               uint32_t mctrl_word;
+               __be32 mctrl_word;
        } u;
 } __attribute__((packed));
 
index 5a64757dc0d1eb0206f2499a7e26e39fec7cda21..575fbf81fad02dcd8e946cecfa9c5d5c35e11810 100644 (file)
@@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev;
 /*
  * has legacy ISA devices ?
  */
-#define arch_has_dev_port()    (isa_bridge_pcidev != NULL)
+#define arch_has_dev_port()    (isa_bridge_pcidev != NULL || isa_io_special)
 #endif
 
 #include <linux/device.h>
@@ -113,7 +113,7 @@ extern bool isa_io_special;
 
 /* gcc 4.0 and older doesn't have 'Z' constraint */
 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
-#define DEF_MMIO_IN_LE(name, size, insn)                               \
+#define DEF_MMIO_IN_X(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_LE(name, size, insn)                              \
+#define DEF_MMIO_OUT_X(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn" %1,0,%2"                     \
@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)        \
        IO_SET_SYNC_FLAG();                                             \
 }
 #else /* newer gcc */
-#define DEF_MMIO_IN_LE(name, size, insn)                               \
+#define DEF_MMIO_IN_X(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_LE(name, size, insn)                              \
+#define DEF_MMIO_OUT_X(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn" %1,%y0"                      \
@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)        \
 }
 #endif
 
-#define DEF_MMIO_IN_BE(name, size, insn)                               \
+#define DEF_MMIO_IN_D(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_BE(name, size, insn)                              \
+#define DEF_MMIO_OUT_D(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0"                 \
@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val)      \
        IO_SET_SYNC_FLAG();                                             \
 }
 
+DEF_MMIO_IN_D(in_8,     8, lbz);
+DEF_MMIO_OUT_D(out_8,   8, stb);
 
-DEF_MMIO_IN_BE(in_8,     8, lbz);
-DEF_MMIO_IN_BE(in_be16, 16, lhz);
-DEF_MMIO_IN_BE(in_be32, 32, lwz);
-DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
-DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
 
-DEF_MMIO_OUT_BE(out_8,     8, stb);
-DEF_MMIO_OUT_BE(out_be16, 16, sth);
-DEF_MMIO_OUT_BE(out_be32, 32, stw);
-DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
-DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
 
 #ifdef __powerpc64__
-DEF_MMIO_OUT_BE(out_be64, 64, std);
-DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
 
 /* There is no asm instructions for 64 bits reverse loads and stores */
 static inline u64 in_le64(const volatile u64 __iomem *addr)
@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
 {
        out_be64(addr, swab64(val));
 }
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+       return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+       out_le64(addr, swab64(val));
+}
+
+#endif
 #endif /* __powerpc64__ */
 
 /*
index 0e40843a1c6ed58273c1a0e2eb22841e9007e977..41f13cec8a8fcd01bbd2e02f3cfc50083b3addba 100644 (file)
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
 
 extern void irq_ctx_init(void);
 extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
-                          struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
 extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
 
 int irq_choose_cpu(const struct cpumask *mask);
 
index ae098c438f009eb0e312fc8b78a6ce07f4311cc6..f016bb699b5f6200268d3b8e5bf4e2bd003dbbf7 100644 (file)
@@ -19,7 +19,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\n\t"
+       asm_volatile_goto("1:\n\t"
                 "nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
index 8b480901165a5222634c60c81408651e3da76a38..ad3025d0880b1c709d073e9d42f55a71a9a8159a 100644 (file)
@@ -78,6 +78,18 @@ struct machdep_calls {
                                    long index);
        void            (*tce_flush)(struct iommu_table *tbl);
 
+       /* _rm versions are for real mode use only */
+       int             (*tce_build_rm)(struct iommu_table *tbl,
+                                    long index,
+                                    long npages,
+                                    unsigned long uaddr,
+                                    enum dma_data_direction direction,
+                                    struct dma_attrs *attrs);
+       void            (*tce_free_rm)(struct iommu_table *tbl,
+                                   long index,
+                                   long npages);
+       void            (*tce_flush_rm)(struct iommu_table *tbl);
+
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
                                   unsigned long flags, void *caller);
        void            (*iounmap)(volatile void __iomem *token);
@@ -263,6 +275,10 @@ struct machdep_calls {
        ssize_t (*cpu_probe)(const char *, size_t);
        ssize_t (*cpu_release)(const char *, size_t);
 #endif
+
+#ifdef CONFIG_ARCH_RANDOM
+       int (*get_random_long)(unsigned long *v);
+#endif
 };
 
 extern void e500_idle(void);
index c4cf01197273f8a88203d35c34b1516e356040a4..807014dde821058429b41a5d825759d50d4d9ed7 100644 (file)
@@ -135,8 +135,8 @@ extern char initial_stab[];
 #ifndef __ASSEMBLY__
 
 struct hash_pte {
-       unsigned long v;
-       unsigned long r;
+       __be64 v;
+       __be64 r;
 };
 
 extern struct hash_pte *htab_address;
index c5cd72833d6e7f29daba00655296750b0c560b16..4cc33ba1edd3ed292cacd1d70bd3030023bbc3e5 100644 (file)
@@ -460,10 +460,12 @@ enum {
 
 enum {
        OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+       OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
 };
 
 enum {
        OPAL_P7IOC_NUM_PEST_REGS = 128,
+       OPAL_PHB3_NUM_PEST_REGS = 256
 };
 
 struct OpalIoPhbErrorCommon {
@@ -531,28 +533,91 @@ struct OpalIoP7IOCPhbErrorData {
        uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
 };
 
+struct OpalIoPhb3ErrorData {
+       struct OpalIoPhbErrorCommon common;
+
+       uint32_t brdgCtl;
+
+       /* PHB3 UTL regs */
+       uint32_t portStatusReg;
+       uint32_t rootCmplxStatus;
+       uint32_t busAgentStatus;
+
+       /* PHB3 cfg regs */
+       uint32_t deviceStatus;
+       uint32_t slotStatus;
+       uint32_t linkStatus;
+       uint32_t devCmdStatus;
+       uint32_t devSecStatus;
+
+       /* cfg AER regs */
+       uint32_t rootErrorStatus;
+       uint32_t uncorrErrorStatus;
+       uint32_t corrErrorStatus;
+       uint32_t tlpHdr1;
+       uint32_t tlpHdr2;
+       uint32_t tlpHdr3;
+       uint32_t tlpHdr4;
+       uint32_t sourceId;
+
+       uint32_t rsv3;
+
+       /* Record data about the call to allocate a buffer */
+       uint64_t errorClass;
+       uint64_t correlator;
+
+       uint64_t nFir;                  /* 000 */
+       uint64_t nFirMask;              /* 003 */
+       uint64_t nFirWOF;               /* 008 */
+
+       /* PHB3 MMIO Error Regs */
+       uint64_t phbPlssr;              /* 120 */
+       uint64_t phbCsr;                /* 110 */
+       uint64_t lemFir;                /* C00 */
+       uint64_t lemErrorMask;          /* C18 */
+       uint64_t lemWOF;                /* C40 */
+       uint64_t phbErrorStatus;        /* C80 */
+       uint64_t phbFirstErrorStatus;   /* C88 */
+       uint64_t phbErrorLog0;          /* CC0 */
+       uint64_t phbErrorLog1;          /* CC8 */
+       uint64_t mmioErrorStatus;       /* D00 */
+       uint64_t mmioFirstErrorStatus;  /* D08 */
+       uint64_t mmioErrorLog0;         /* D40 */
+       uint64_t mmioErrorLog1;         /* D48 */
+       uint64_t dma0ErrorStatus;       /* D80 */
+       uint64_t dma0FirstErrorStatus;  /* D88 */
+       uint64_t dma0ErrorLog0;         /* DC0 */
+       uint64_t dma0ErrorLog1;         /* DC8 */
+       uint64_t dma1ErrorStatus;       /* E00 */
+       uint64_t dma1FirstErrorStatus;  /* E08 */
+       uint64_t dma1ErrorLog0;         /* E40 */
+       uint64_t dma1ErrorLog1;         /* E48 */
+       uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
+       uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
 typedef struct oppanel_line {
        const char *    line;
        uint64_t        line_len;
 } oppanel_line_t;
 
 /* API functions */
-int64_t opal_console_write(int64_t term_number, int64_t *length,
+int64_t opal_console_write(int64_t term_number, __be64 *length,
                           const uint8_t *buffer);
-int64_t opal_console_read(int64_t term_number, int64_t *length,
+int64_t opal_console_read(int64_t term_number, __be64 *length,
                          uint8_t *buffer);
 int64_t opal_console_write_buffer_space(int64_t term_number,
-                                       int64_t *length);
-int64_t opal_rtc_read(uint32_t *year_month_day,
-                     uint64_t *hour_minute_second_millisecond);
+                                       __be64 *length);
+int64_t opal_rtc_read(__be32 *year_month_day,
+                     __be64 *hour_minute_second_millisecond);
 int64_t opal_rtc_write(uint32_t year_month_day,
                       uint64_t hour_minute_second_millisecond);
 int64_t opal_cec_power_down(uint64_t request);
 int64_t opal_cec_reboot(void);
 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
-int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask);
-int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
 int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
                                    uint64_t tce_mem_size);
 int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
@@ -560,9 +625,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
 int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
                                  uint64_t offset, uint8_t *data);
 int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
-                                      uint64_t offset, uint16_t *data);
+                                      uint64_t offset, __be16 *data);
 int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
-                                 uint64_t offset, uint32_t *data);
+                                 uint64_t offset, __be32 *data);
 int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
                                   uint64_t offset, uint8_t data);
 int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
@@ -570,14 +635,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
 int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
                                   uint64_t offset, uint32_t data);
 int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
-int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
 int64_t opal_register_exception_handler(uint64_t opal_exception,
                                        uint64_t handler_address,
                                        uint64_t glue_cache_line);
 int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
                                   uint8_t *freeze_state,
-                                  uint16_t *pci_error_type,
-                                  uint64_t *phb_status);
+                                  __be16 *pci_error_type,
+                                  __be64 *phb_status);
 int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
                                  uint64_t eeh_action_token);
 int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -614,13 +679,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
 int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
                             uint32_t xive_num);
 int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
-                            int32_t *interrupt_source_number);
+                            __be32 *interrupt_source_number);
 int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
-                       uint8_t msi_range, uint32_t *msi_address,
-                       uint32_t *message_data);
+                       uint8_t msi_range, __be32 *msi_address,
+                       __be32 *message_data);
 int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
                        uint32_t xive_num, uint8_t msi_range,
-                       uint64_t *msi_address, uint32_t *message_data);
+                       __be64 *msi_address, __be32 *message_data);
 int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
 int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
 int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
@@ -642,7 +707,7 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);
 int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(uint64_t *status);
+int64_t opal_get_epow_status(__be64 *status);
 int64_t opal_set_system_attention_led(uint8_t led_action);
 int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
                            uint16_t *pci_error_type, uint16_t *severity);
index 46db09414a1063415fccc9ef74b9f4b664de02f3..4a191c47286748c65cf2e7eb8753239af4c3b5d5 100644 (file)
@@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
        hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
 }
 
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
 static inline char *get_hpte_slot_array(pmd_t *pmdp)
 {
        /*
index d7fe9f5b46d457cf0895c7dee542bc2e40fb3e49..ad5fcf51b25225c7827f45347ce94d8f7c2584b6 100644 (file)
 #define PPC_INST_TLBIVAX               0x7c000624
 #define PPC_INST_TLBSRX_DOT            0x7c0006a5
 #define PPC_INST_XXLOR                 0xf0000510
+#define PPC_INST_XXSWAPD               0xf0000250
 #define PPC_INST_XVCPSGNDP             0xf0000780
 #define PPC_INST_TRECHKPT              0x7c0007dd
 #define PPC_INST_TRECLAIM              0x7c00075d
                                               VSX_XX1((s), a, b))
 #define XXLOR(t, a, b)         stringify_in_c(.long PPC_INST_XXLOR | \
                                               VSX_XX3((t), a, b))
+#define XXSWAPD(t, a)          stringify_in_c(.long PPC_INST_XXSWAPD | \
+                                              VSX_XX3((t), a, a))
 #define XVCPSGNDP(t, a, b)     stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
                                               VSX_XX3((t), (a), (b))))
 
index 599545738af3e2354b137221cf1c07cf68e25440..8deaaad3b32fcfa0c2bf0bd14434dcdfd411fe7a 100644 (file)
@@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 #define REST_8GPRS(n, base)    REST_4GPRS(n, base); REST_4GPRS(n+4, base)
 #define REST_10GPRS(n, base)   REST_8GPRS(n, base); REST_2GPRS(n+8, base)
 
-#define SAVE_FPR(n, base)      stfd    n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_FPR(n, base)      stfd    n,8*TS_FPRWIDTH*(n)(base)
 #define SAVE_2FPRS(n, base)    SAVE_FPR(n, base); SAVE_FPR(n+1, base)
 #define SAVE_4FPRS(n, base)    SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
 #define SAVE_8FPRS(n, base)    SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
 #define SAVE_16FPRS(n, base)   SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
 #define SAVE_32FPRS(n, base)   SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base)      lfd     n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_FPR(n, base)      lfd     n,8*TS_FPRWIDTH*(n)(base)
 #define REST_2FPRS(n, base)    REST_FPR(n, base); REST_FPR(n+1, base)
 #define REST_4FPRS(n, base)    REST_2FPRS(n, base); REST_2FPRS(n+2, base)
 #define REST_8FPRS(n, base)    REST_4FPRS(n, base); REST_4FPRS(n+4, base)
 #define REST_16FPRS(n, base)   REST_8FPRS(n, base); REST_8FPRS(n+8, base)
 #define REST_32FPRS(n, base)   REST_16FPRS(n, base); REST_16FPRS(n+16, base)
 
-#define SAVE_VR(n,b,base)      li b,THREAD_VR0+(16*(n));  stvx n,base,b
+#define SAVE_VR(n,b,base)      li b,16*(n);  stvx n,base,b
 #define SAVE_2VRS(n,b,base)    SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
 #define SAVE_4VRS(n,b,base)    SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
 #define SAVE_8VRS(n,b,base)    SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
 #define SAVE_16VRS(n,b,base)   SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
 #define SAVE_32VRS(n,b,base)   SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base)      li b,THREAD_VR0+(16*(n)); lvx n,base,b
+#define REST_VR(n,b,base)      li b,16*(n); lvx n,base,b
 #define REST_2VRS(n,b,base)    REST_VR(n,b,base); REST_VR(n+1,b,base)
 #define REST_4VRS(n,b,base)    REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
 #define REST_8VRS(n,b,base)    REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
 #define REST_16VRS(n,b,base)   REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
 #define REST_32VRS(n,b,base)   REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 
-/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
- * thread_struct:
- */
-#define SAVE_FPR_TRANSACT(n, base)     stfd n,THREAD_TRANSACT_FPR0+    \
-                                       8*TS_FPRWIDTH*(n)(base)
-#define SAVE_2FPRS_TRANSACT(n, base)   SAVE_FPR_TRANSACT(n, base);     \
-                                       SAVE_FPR_TRANSACT(n+1, base)
-#define SAVE_4FPRS_TRANSACT(n, base)   SAVE_2FPRS_TRANSACT(n, base);   \
-                                       SAVE_2FPRS_TRANSACT(n+2, base)
-#define SAVE_8FPRS_TRANSACT(n, base)   SAVE_4FPRS_TRANSACT(n, base);   \
-                                       SAVE_4FPRS_TRANSACT(n+4, base)
-#define SAVE_16FPRS_TRANSACT(n, base)  SAVE_8FPRS_TRANSACT(n, base);   \
-                                       SAVE_8FPRS_TRANSACT(n+8, base)
-#define SAVE_32FPRS_TRANSACT(n, base)  SAVE_16FPRS_TRANSACT(n, base);  \
-                                       SAVE_16FPRS_TRANSACT(n+16, base)
-
-#define REST_FPR_TRANSACT(n, base)     lfd     n,THREAD_TRANSACT_FPR0+ \
-                                       8*TS_FPRWIDTH*(n)(base)
-#define REST_2FPRS_TRANSACT(n, base)   REST_FPR_TRANSACT(n, base);     \
-                                       REST_FPR_TRANSACT(n+1, base)
-#define REST_4FPRS_TRANSACT(n, base)   REST_2FPRS_TRANSACT(n, base);   \
-                                       REST_2FPRS_TRANSACT(n+2, base)
-#define REST_8FPRS_TRANSACT(n, base)   REST_4FPRS_TRANSACT(n, base);   \
-                                       REST_4FPRS_TRANSACT(n+4, base)
-#define REST_16FPRS_TRANSACT(n, base)  REST_8FPRS_TRANSACT(n, base);   \
-                                       REST_8FPRS_TRANSACT(n+8, base)
-#define REST_32FPRS_TRANSACT(n, base)  REST_16FPRS_TRANSACT(n, base);  \
-                                       REST_16FPRS_TRANSACT(n+16, base)
-
-
-#define SAVE_VR_TRANSACT(n,b,base)     li b,THREAD_TRANSACT_VR0+(16*(n)); \
-                                       stvx n,b,base
-#define SAVE_2VRS_TRANSACT(n,b,base)   SAVE_VR_TRANSACT(n,b,base);     \
-                                       SAVE_VR_TRANSACT(n+1,b,base)
-#define SAVE_4VRS_TRANSACT(n,b,base)   SAVE_2VRS_TRANSACT(n,b,base);   \
-                                       SAVE_2VRS_TRANSACT(n+2,b,base)
-#define SAVE_8VRS_TRANSACT(n,b,base)   SAVE_4VRS_TRANSACT(n,b,base);   \
-                                       SAVE_4VRS_TRANSACT(n+4,b,base)
-#define SAVE_16VRS_TRANSACT(n,b,base)  SAVE_8VRS_TRANSACT(n,b,base);   \
-                                       SAVE_8VRS_TRANSACT(n+8,b,base)
-#define SAVE_32VRS_TRANSACT(n,b,base)  SAVE_16VRS_TRANSACT(n,b,base);  \
-                                       SAVE_16VRS_TRANSACT(n+16,b,base)
-
-#define REST_VR_TRANSACT(n,b,base)     li b,THREAD_TRANSACT_VR0+(16*(n)); \
-                                       lvx n,b,base
-#define REST_2VRS_TRANSACT(n,b,base)   REST_VR_TRANSACT(n,b,base);     \
-                                       REST_VR_TRANSACT(n+1,b,base)
-#define REST_4VRS_TRANSACT(n,b,base)   REST_2VRS_TRANSACT(n,b,base);   \
-                                       REST_2VRS_TRANSACT(n+2,b,base)
-#define REST_8VRS_TRANSACT(n,b,base)   REST_4VRS_TRANSACT(n,b,base);   \
-                                       REST_4VRS_TRANSACT(n+4,b,base)
-#define REST_16VRS_TRANSACT(n,b,base)  REST_8VRS_TRANSACT(n,b,base);   \
-                                       REST_8VRS_TRANSACT(n+8,b,base)
-#define REST_32VRS_TRANSACT(n,b,base)  REST_16VRS_TRANSACT(n,b,base);  \
-                                       REST_16VRS_TRANSACT(n+16,b,base)
-
-
-#define SAVE_VSR_TRANSACT(n,b,base)    li b,THREAD_TRANSACT_VSR0+(16*(n)); \
-                                       STXVD2X(n,R##base,R##b)
-#define SAVE_2VSRS_TRANSACT(n,b,base)  SAVE_VSR_TRANSACT(n,b,base);    \
-                                       SAVE_VSR_TRANSACT(n+1,b,base)
-#define SAVE_4VSRS_TRANSACT(n,b,base)  SAVE_2VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_2VSRS_TRANSACT(n+2,b,base)
-#define SAVE_8VSRS_TRANSACT(n,b,base)  SAVE_4VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_4VSRS_TRANSACT(n+4,b,base)
-#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_8VSRS_TRANSACT(n+8,b,base)
-#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
-                                       SAVE_16VSRS_TRANSACT(n+16,b,base)
-
-#define REST_VSR_TRANSACT(n,b,base)    li b,THREAD_TRANSACT_VSR0+(16*(n)); \
-                                       LXVD2X(n,R##base,R##b)
-#define REST_2VSRS_TRANSACT(n,b,base)  REST_VSR_TRANSACT(n,b,base);    \
-                                       REST_VSR_TRANSACT(n+1,b,base)
-#define REST_4VSRS_TRANSACT(n,b,base)  REST_2VSRS_TRANSACT(n,b,base);  \
-                                       REST_2VSRS_TRANSACT(n+2,b,base)
-#define REST_8VSRS_TRANSACT(n,b,base)  REST_4VSRS_TRANSACT(n,b,base);  \
-                                       REST_4VSRS_TRANSACT(n+4,b,base)
-#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base);  \
-                                       REST_8VSRS_TRANSACT(n+8,b,base)
-#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
-                                       REST_16VSRS_TRANSACT(n+16,b,base)
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base)          STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base)           LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base)          XXSWAPD(n,n);           \
+                                       STXVD2X(n,b,base);      \
+                                       XXSWAPD(n,n)
 
+#define LXVD2X_ROT(n,b,base)           LXVD2X(n,b,base);       \
+                                       XXSWAPD(n,n)
+#endif
 /* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base)     li b,THREAD_VSR0+(16*(n));  STXVD2X(n,R##base,R##b)
+#define SAVE_VSR(n,b,base)     li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
 #define SAVE_2VSRS(n,b,base)   SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
 #define SAVE_4VSRS(n,b,base)   SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
 #define SAVE_8VSRS(n,b,base)   SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
 #define SAVE_16VSRS(n,b,base)  SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
 #define SAVE_32VSRS(n,b,base)  SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base)     li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b)
+#define REST_VSR(n,b,base)     li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
 #define REST_2VSRS(n,b,base)   REST_VSR(n,b,base); REST_VSR(n+1,b,base)
 #define REST_4VSRS(n,b,base)   REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
 #define REST_8VSRS(n,b,base)   REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
@@ -832,6 +760,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
 #define N_SLINE        68
 #define N_SO   100
 
-#endif /*  __ASSEMBLY__ */
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
 
+#ifdef CONFIG_PPC_BOOK3E
+#define FIXUP_ENDIAN
+#else
+#define FIXUP_ENDIAN                                              \
+       tdi   0,0,0x48;   /* Reverse endian of b . + 8          */ \
+       b     $+36;       /* Skip trampoline if endian is good  */ \
+       .long 0x05009f42; /* bcl 20,31,$+4                      */ \
+       .long 0xa602487d; /* mflr r10                           */ \
+       .long 0x1c004a39; /* addi r10,r10,28                    */ \
+       .long 0xa600607d; /* mfmsr r11                          */ \
+       .long 0x01006b69; /* xori r11,r11,1                     */ \
+       .long 0xa6035a7d; /* mtsrr0 r10                         */ \
+       .long 0xa6037b7d; /* mtsrr1 r11                         */ \
+       .long 0x2400004c  /* rfid                               */
+#endif /* !CONFIG_PPC_BOOK3E */
+#endif /*  __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_PPC_ASM_H */
index e378cccfca55bb20db094f12a2009ad3ba7bf1a4..c1583070937de03a311784c72b5c16ab7e85bff4 100644 (file)
 
 #ifdef CONFIG_VSX
 #define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
 #else
 #define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
 #endif
 
 #ifdef CONFIG_PPC64
@@ -142,15 +152,23 @@ typedef struct {
        unsigned long seg;
 } mm_segment_t;
 
-#define TS_FPROFFSET 0
-#define TS_VSRLOWOFFSET 1
-#define TS_FPR(i) fpr[i][TS_FPROFFSET]
-#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
+
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+       u64     fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+       u64     fpscr;          /* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+       vector128       vr[32] __attribute__((aligned(16)));
+       vector128       vscr __attribute__((aligned(16)));
+};
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
-       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
-
 #ifdef CONFIG_PPC64
        unsigned long   ksp_vsid;
 #endif
@@ -162,6 +180,7 @@ struct thread_struct {
 #endif
 #ifdef CONFIG_PPC32
        void            *pgdir;         /* root of page-table tree */
+       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
 #endif
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        /*
@@ -199,13 +218,8 @@ struct thread_struct {
        unsigned long   dvc2;
 #endif
 #endif
-       /* FP and VSX 0-31 register set */
-       double          fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
-       struct {
-
-               unsigned int pad;
-               unsigned int val;       /* Floating point status */
-       } fpscr;
+       struct thread_fp_state  fp_state;
+       struct thread_fp_state  *fp_save_area;
        int             fpexc_mode;     /* floating-point exception mode */
        unsigned int    align_ctl;      /* alignment handling control */
 #ifdef CONFIG_PPC64
@@ -223,10 +237,8 @@ struct thread_struct {
        struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
        unsigned long   trap_nr;        /* last trap # on this thread */
 #ifdef CONFIG_ALTIVEC
-       /* Complete AltiVec register set */
-       vector128       vr[32] __attribute__((aligned(16)));
-       /* AltiVec status */
-       vector128       vscr __attribute__((aligned(16)));
+       struct thread_vr_state vr_state;
+       struct thread_vr_state *vr_save_area;
        unsigned long   vrsave;
        int             used_vr;        /* set if process has used altivec */
 #endif /* CONFIG_ALTIVEC */
@@ -263,13 +275,8 @@ struct thread_struct {
         * transact_fpr[] is the new set of transactional values.
         * VRs work the same way.
         */
-       double          transact_fpr[32][TS_FPRWIDTH];
-       struct {
-               unsigned int pad;
-               unsigned int val;       /* Floating point status */
-       } transact_fpscr;
-       vector128       transact_vr[32] __attribute__((aligned(16)));
-       vector128       transact_vscr __attribute__((aligned(16)));
+       struct thread_fp_state transact_fp;
+       struct thread_vr_state transact_vr;
        unsigned long   transact_vrsave;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -321,11 +328,8 @@ struct thread_struct {
 #else
 #define INIT_THREAD  { \
        .ksp = INIT_SP, \
-       .ksp_limit = INIT_SP_LIMIT, \
        .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
        .fs = KERNEL_DS, \
-       .fpr = {{0}}, \
-       .fpscr = { .val = 0, }, \
        .fpexc_mode = 0, \
        .ppr = INIT_PPR, \
 }
@@ -363,6 +367,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
 
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
 {
        return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
index 10d1ef016bf1b73f199387b9640cf9ffb8429b6f..126f6e98f84de687d1d26d2ab83ff4b1d2a6fc86 100644 (file)
 #define MSR_64BIT      MSR_SF
 
 /* Server variant */
-#define MSR_           (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define __MSR          (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_           __MSR
+#else
+#define MSR_           (__MSR | MSR_LE)
+#endif
 #define MSR_KERNEL     (MSR_ | MSR_64BIT)
 #define MSR_USER32     (MSR_ | MSR_PR | MSR_EE)
 #define MSR_USER64     (MSR_USER32 | MSR_64BIT)
index 0cabfd7bc2d1e5603d1cfa64e49c3cbd4557224e..07dcdcfdaefc4b4b4ed5fd01de00ffc8ebefa712 100644 (file)
@@ -54,8 +54,8 @@ struct scom_controller {
        scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
        void (*unmap)(scom_map_t map);
 
-       u64 (*read)(scom_map_t map, u32 reg);
-       void (*write)(scom_map_t map, u32 reg, u64 value);
+       int (*read)(scom_map_t map, u32 reg, u64 *value);
+       int (*write)(scom_map_t map, u32 reg, u64 value);
 };
 
 extern const struct scom_controller *scom_controller;
@@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map)
  * scom_read - Read a SCOM register
  * @map: Result of scom_map
  * @reg: Register index within that map
+ * @value: Updated with the value read
+ *
+ * Returns 0 (success) or a negative error code
  */
-static inline u64 scom_read(scom_map_t map, u32 reg)
+static inline int scom_read(scom_map_t map, u32 reg, u64 *value)
 {
-       return scom_controller->read(map, reg);
+       int rc;
+
+       rc = scom_controller->read(map, reg, value);
+       if (rc)
+               *value = 0xfffffffffffffffful;
+       return rc;
 }
 
 /**
@@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg)
  * @map: Result of scom_map
  * @reg: Register index within that map
  * @value: Value to write
+ *
+ * Returns 0 (success) or a negative error code
  */
-static inline void scom_write(scom_map_t map, u32 reg, u64 value)
+static inline int scom_write(scom_map_t map, u32 reg, u64 value)
 {
-       scom_controller->write(map, reg, value);
+       return scom_controller->write(map, reg, value);
 }
 
+
 #endif /* CONFIG_PPC_SCOM */
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index 3a7a67a0d006cfe24d0b2bd626e7361d771dde1b..d89beaba26ff95d2ab0ed48cdaf1ba7fc8f3bd73 100644 (file)
 #define FP_EX_DIVZERO         (1 << (31 - 5))
 #define FP_EX_INEXACT         (1 << (31 - 6))
 
-#define __FPU_FPSCR    (current->thread.fpscr.val)
+#define __FPU_FPSCR    (current->thread.fp_state.fpscr)
 
 /* We only actually write to the destination register
  * if exceptions signalled (if any) will not trap.
index e40010abcaf134f53bbcf639bf6999b856a42a2a..0dffad6bcc846725a273daff18593c7b03305060 100644 (file)
@@ -10,7 +10,9 @@
 #define __HAVE_ARCH_STRNCMP
 #define __HAVE_ARCH_STRCAT
 #define __HAVE_ARCH_MEMSET
+#ifdef __BIG_ENDIAN__
 #define __HAVE_ARCH_MEMCPY
+#endif
 #define __HAVE_ARCH_MEMMOVE
 #define __HAVE_ARCH_MEMCMP
 #define __HAVE_ARCH_MEMCHR
@@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *);
 extern int strncmp(const char *, const char *, __kernel_size_t);
 extern char * strcat(char *, const char *);
 extern void * memset(void *,int,__kernel_size_t);
+#ifdef __BIG_ENDIAN__
 extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
 extern void * memmove(void *,const void *,__kernel_size_t);
 extern int memcmp(const void *,const void *,__kernel_size_t);
 extern void * memchr(const void *,int,__kernel_size_t);
index d0b6d4ac6dda58c99d7fc6c884f0bbc11d4561e0..213a5f2b0717da8240f59e1b01c4df31ddebac17 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/kernel.h>
 #include <asm/asm-compat.h>
 
+#ifdef __BIG_ENDIAN__
+
 struct word_at_a_time {
        const unsigned long high_bits, low_bits;
 };
@@ -38,4 +40,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
        return (val + c->high_bits) & ~rhs;
 }
 
+#else
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+       return mask*0x0001020304050608ul >> 56;
+}
+
+#else  /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index aa6cc4fac9651326b69a7648b13009f30dd93774..ca931d0740003e71f786fcb15db16cdf53aaadb4 100644 (file)
@@ -7,6 +7,10 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  */
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
 #include <linux/byteorder/big_endian.h>
+#endif
 
 #endif /* _ASM_POWERPC_BYTEORDER_H */
index a6d74467c9edb476ad1175e34670848f946528c8..fa698324a1fd13e50ec0d5e01d526b5a00a2b0b8 100644 (file)
@@ -83,4 +83,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_POWERPC_SOCKET_H */
index a27ccd5dc6b9a5bb18435af2a862d7b5ddb619be..6e3f9772aaba7f27592708dd82fb238f93602e9e 100644 (file)
@@ -54,8 +54,6 @@ struct aligninfo {
 /* DSISR bits reported for a DCBZ instruction: */
 #define DCBZ   0x5f    /* 8xx/82xx dcbz faults when cache not enabled */
 
-#define SWAP(a, b)     (t = (a), (a) = (b), (b) = t)
-
 /*
  * The PowerPC stores certain bits of the instruction that caused the
  * alignment exception in the DSISR register.  This array maps those
@@ -264,6 +262,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
 
 #define SWIZ_PTR(p)            ((unsigned char __user *)((p) ^ swiz))
 
+#ifdef __BIG_ENDIAN__
 static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
                            unsigned int reg, unsigned int nb,
                            unsigned int flags, unsigned int instr,
@@ -392,6 +391,7 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
                return -EFAULT;
        return 1;       /* exception handled and fixed up */
 }
+#endif
 
 #ifdef CONFIG_SPE
 
@@ -458,7 +458,7 @@ static struct aligninfo spe_aligninfo[32] = {
 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
                       unsigned int instr)
 {
-       int t, ret;
+       int ret;
        union {
                u64 ll;
                u32 w[2];
@@ -581,24 +581,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
        if (flags & SW) {
                switch (flags & 0xf0) {
                case E8:
-                       SWAP(data.v[0], data.v[7]);
-                       SWAP(data.v[1], data.v[6]);
-                       SWAP(data.v[2], data.v[5]);
-                       SWAP(data.v[3], data.v[4]);
+                       data.ll = swab64(data.ll);
                        break;
                case E4:
-
-                       SWAP(data.v[0], data.v[3]);
-                       SWAP(data.v[1], data.v[2]);
-                       SWAP(data.v[4], data.v[7]);
-                       SWAP(data.v[5], data.v[6]);
+                       data.w[0] = swab32(data.w[0]);
+                       data.w[1] = swab32(data.w[1]);
                        break;
                /* Its half word endian */
                default:
-                       SWAP(data.v[0], data.v[1]);
-                       SWAP(data.v[2], data.v[3]);
-                       SWAP(data.v[4], data.v[5]);
-                       SWAP(data.v[6], data.v[7]);
+                       data.h[0] = swab16(data.h[0]);
+                       data.h[1] = swab16(data.h[1]);
+                       data.h[2] = swab16(data.h[2]);
+                       data.h[3] = swab16(data.h[3]);
                        break;
                }
        }
@@ -658,14 +652,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
        flush_vsx_to_thread(current);
 
        if (reg < 32)
-               ptr = (char *) &current->thread.TS_FPR(reg);
+               ptr = (char *) &current->thread.fp_state.fpr[reg][0];
        else
-               ptr = (char *) &current->thread.vr[reg - 32];
+               ptr = (char *) &current->thread.vr_state.vr[reg - 32];
 
        lptr = (unsigned long *) ptr;
 
+#ifdef __LITTLE_ENDIAN__
+       if (flags & SW) {
+               elsize = length;
+               sw = length-1;
+       } else {
+               /*
+                * The elements are BE ordered, even in LE mode, so process
+                * them in reverse order.
+                */
+               addr += length - elsize;
+
+               /* 8 byte memory accesses go in the top 8 bytes of the VR */
+               if (length == 8)
+                       ptr += 8;
+       }
+#else
        if (flags & SW)
                sw = elsize-1;
+#endif
 
        for (j = 0; j < length; j += elsize) {
                for (i = 0; i < elsize; ++i) {
@@ -675,19 +686,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
                                ret |= __get_user(ptr[i^sw], addr + i);
                }
                ptr  += elsize;
+#ifdef __LITTLE_ENDIAN__
+               addr -= elsize;
+#else
                addr += elsize;
+#endif
        }
 
+#ifdef __BIG_ENDIAN__
+#define VSX_HI 0
+#define VSX_LO 1
+#else
+#define VSX_HI 1
+#define VSX_LO 0
+#endif
+
        if (!ret) {
                if (flags & U)
                        regs->gpr[areg] = regs->dar;
 
                /* Splat load copies the same data to top and bottom 8 bytes */
                if (flags & SPLT)
-                       lptr[1] = lptr[0];
-               /* For 8 byte loads, zero the top 8 bytes */
+                       lptr[VSX_LO] = lptr[VSX_HI];
+               /* For 8 byte loads, zero the low 8 bytes */
                else if (!(flags & ST) && (8 == length))
-                       lptr[1] = 0;
+                       lptr[VSX_LO] = 0;
        } else
                return -EFAULT;
 
@@ -710,18 +733,28 @@ int fix_alignment(struct pt_regs *regs)
        unsigned int dsisr;
        unsigned char __user *addr;
        unsigned long p, swiz;
-       int ret, t;
-       union {
+       int ret, i;
+       union data {
                u64 ll;
                double dd;
                unsigned char v[8];
                struct {
+#ifdef __LITTLE_ENDIAN__
+                       int      low32;
+                       unsigned hi32;
+#else
                        unsigned hi32;
                        int      low32;
+#endif
                } x32;
                struct {
+#ifdef __LITTLE_ENDIAN__
+                       short         low16;
+                       unsigned char hi48[6];
+#else
                        unsigned char hi48[6];
                        short         low16;
+#endif
                } x16;
        } data;
 
@@ -780,8 +813,9 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Byteswap little endian loads and stores */
        swiz = 0;
-       if (regs->msr & MSR_LE) {
+       if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
                flags ^= SW;
+#ifdef __BIG_ENDIAN__
                /*
                 * So-called "PowerPC little endian" mode works by
                 * swizzling addresses rather than by actually doing
@@ -794,6 +828,7 @@ int fix_alignment(struct pt_regs *regs)
                 */
                if (cpu_has_feature(CPU_FTR_PPC_LE))
                        swiz = 7;
+#endif
        }
 
        /* DAR has the operand effective address */
@@ -818,7 +853,7 @@ int fix_alignment(struct pt_regs *regs)
                        elsize = 8;
 
                flags = 0;
-               if (regs->msr & MSR_LE)
+               if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
                        flags |= SW;
                if (instruction & 0x100)
                        flags |= ST;
@@ -847,9 +882,13 @@ int fix_alignment(struct pt_regs *regs)
         * function
         */
        if (flags & M) {
+#ifdef __BIG_ENDIAN__
                PPC_WARN_ALIGNMENT(multiple, regs);
                return emulate_multiple(regs, addr, reg, nb,
                                        flags, instr, swiz);
+#else
+               return -EFAULT;
+#endif
        }
 
        /* Verify the address of the operand */
@@ -868,8 +907,12 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Special case for 16-byte FP loads and stores */
        if (nb == 16) {
+#ifdef __BIG_ENDIAN__
                PPC_WARN_ALIGNMENT(fp_pair, regs);
                return emulate_fp_pair(addr, reg, flags);
+#else
+               return -EFAULT;
+#endif
        }
 
        PPC_WARN_ALIGNMENT(unaligned, regs);
@@ -878,32 +921,36 @@ int fix_alignment(struct pt_regs *regs)
         * get it from register values
         */
        if (!(flags & ST)) {
-               data.ll = 0;
-               ret = 0;
-               p = (unsigned long) addr;
+               unsigned int start = 0;
+
                switch (nb) {
-               case 8:
-                       ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
                case 4:
-                       ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
+                       start = offsetof(union data, x32.low32);
+                       break;
                case 2:
-                       ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
-                       if (unlikely(ret))
-                               return -EFAULT;
+                       start = offsetof(union data, x16.low16);
+                       break;
                }
+
+               data.ll = 0;
+               ret = 0;
+               p = (unsigned long)addr;
+
+               for (i = 0; i < nb; i++)
+                       ret |= __get_user_inatomic(data.v[start + i],
+                                                  SWIZ_PTR(p++));
+
+               if (unlikely(ret))
+                       return -EFAULT;
+
        } else if (flags & F) {
-               data.dd = current->thread.TS_FPR(reg);
+               data.ll = current->thread.TS_FPR(reg);
                if (flags & S) {
                        /* Single-precision FP store requires conversion... */
 #ifdef CONFIG_PPC_FPU
                        preempt_disable();
                        enable_kernel_fp();
-                       cvt_df(&data.dd, (float *)&data.v[4]);
+                       cvt_df(&data.dd, (float *)&data.x32.low32);
                        preempt_enable();
 #else
                        return 0;
@@ -915,17 +962,13 @@ int fix_alignment(struct pt_regs *regs)
        if (flags & SW) {
                switch (nb) {
                case 8:
-                       SWAP(data.v[0], data.v[7]);
-                       SWAP(data.v[1], data.v[6]);
-                       SWAP(data.v[2], data.v[5]);
-                       SWAP(data.v[3], data.v[4]);
+                       data.ll = swab64(data.ll);
                        break;
                case 4:
-                       SWAP(data.v[4], data.v[7]);
-                       SWAP(data.v[5], data.v[6]);
+                       data.x32.low32 = swab32(data.x32.low32);
                        break;
                case 2:
-                       SWAP(data.v[6], data.v[7]);
+                       data.x16.low16 = swab16(data.x16.low16);
                        break;
                }
        }
@@ -947,7 +990,7 @@ int fix_alignment(struct pt_regs *regs)
 #ifdef CONFIG_PPC_FPU
                preempt_disable();
                enable_kernel_fp();
-               cvt_fd((float *)&data.v[4], &data.dd);
+               cvt_fd((float *)&data.x32.low32, &data.dd);
                preempt_enable();
 #else
                return 0;
@@ -957,25 +1000,28 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Store result to memory or update registers */
        if (flags & ST) {
-               ret = 0;
-               p = (unsigned long) addr;
+               unsigned int start = 0;
+
                switch (nb) {
-               case 8:
-                       ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
                case 4:
-                       ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
+                       start = offsetof(union data, x32.low32);
+                       break;
                case 2:
-                       ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
+                       start = offsetof(union data, x16.low16);
+                       break;
                }
+
+               ret = 0;
+               p = (unsigned long)addr;
+
+               for (i = 0; i < nb; i++)
+                       ret |= __put_user_inatomic(data.v[start + i],
+                                                  SWIZ_PTR(p++));
+
                if (unlikely(ret))
                        return -EFAULT;
        } else if (flags & F)
-               current->thread.TS_FPR(reg) = data.dd;
+               current->thread.TS_FPR(reg) = data.ll;
        else
                regs->gpr[reg] = data.ll;
 
index d8958be5f31a18b7c0bae16509749067aae75265..6278edddc3f8fd7deac04c8af6162bc8f22beda3 100644 (file)
@@ -80,25 +80,27 @@ int main(void)
        DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
 #else
        DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+       DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 #endif /* CONFIG_PPC64 */
 
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
        DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
 #endif
        DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
-       DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
-       DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+       DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
+       DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
+       DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
 #ifdef CONFIG_ALTIVEC
-       DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+       DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
+       DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
        DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
-       DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
        DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+       DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
-       DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
        DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
 #endif /* CONFIG_VSX */
 #ifdef CONFIG_PPC64
@@ -142,20 +144,12 @@ int main(void)
        DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
        DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
        DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
-       DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
-                                        transact_vr[0]));
-       DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
-                                         transact_vscr));
+       DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
+                                                transact_vr));
        DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
                                            transact_vrsave));
-       DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct,
-                                         transact_fpr[0]));
-       DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
-                                          transact_fpscr));
-#ifdef CONFIG_VSX
-       DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
-                                         transact_fpr[0]));
-#endif
+       DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
+                                                transact_fp));
        /* Local pt_regs on stack for Transactional Memory funcs. */
        DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
               sizeof(struct pt_regs) + 16);
index 55593ee2d5aacfee6ef9e86c5adfd4057c5cab10..58906d7f4c4950b5830f9706ba01cc1c3c8985f0 100644 (file)
@@ -189,8 +189,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
        }
 
        /* If PCI-E capable, dump PCI-E cap 10, and the AER */
-       cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
-       if (cap) {
+       if (pci_is_pcie(dev)) {
                n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
                printk(KERN_WARNING
                       "EEH: PCI-E capabilities and status follow:\n");
@@ -327,11 +326,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
        /* Isolate the PHB and send event */
        eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
        eeh_serialize_unlock(flags);
-       eeh_send_failure_event(phb_pe);
 
        pr_err("EEH: PHB#%x failure detected\n",
                phb_pe->phb->global_number);
        dump_stack();
+       eeh_send_failure_event(phb_pe);
 
        return 1;
 out:
@@ -454,8 +453,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
        eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
        eeh_serialize_unlock(flags);
 
-       eeh_send_failure_event(pe);
-
        /* Most EEH events are due to device driver bugs.  Having
         * a stack trace will help the device-driver authors figure
         * out what happened.  So print that out.
@@ -464,6 +461,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
                pe->addr, pe->phb->global_number);
        dump_stack();
 
+       eeh_send_failure_event(pe);
+
        return 1;
 
 dn_unlock:
index c04cdf70d487536614899da24fe88beac50a0f54..12679cd43e0c4600c52d87695145760501a78530 100644 (file)
@@ -673,9 +673,7 @@ _GLOBAL(ret_from_except_lite)
 
 resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
-       CURRENT_THREAD_INFO(r9, r1)
-       ld      r8,TI_FLAGS(r9)
-       andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
+       andis.  r8,r4,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
@@ -1017,7 +1015,7 @@ _GLOBAL(enter_rtas)
        
         li      r9,1
         rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
-       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
        andc    r6,r0,r9
        sync                            /* disable interrupts so SRR0/1 */
        mtmsrd  r0                      /* don't get trashed */
@@ -1032,6 +1030,8 @@ _GLOBAL(enter_rtas)
        b       .       /* prevent speculative execution */
 
 _STATIC(rtas_return_loc)
+       FIXUP_ENDIAN
+
        /* relocation is off at this point */
        GET_PACA(r4)
        clrldi  r4,r4,2                 /* convert to realmode address */
@@ -1103,28 +1103,30 @@ _GLOBAL(enter_prom)
        std     r10,_CCR(r1)
        std     r11,_MSR(r1)
 
-       /* Get the PROM entrypoint */
-       mtlr    r4
+       /* Put PROM address in SRR0 */
+       mtsrr0  r4
 
-       /* Switch MSR to 32 bits mode
+       /* Setup our trampoline return addr in LR */
+       bcl     20,31,$+4
+0:     mflr    r4
+       addi    r4,r4,(1f - 0b)
+               mtlr    r4
+
+       /* Prepare a 32-bit mode big endian MSR
         */
 #ifdef CONFIG_PPC_BOOK3E
        rlwinm  r11,r11,0,1,31
-       mtmsr   r11
+       mtsrr1  r11
+       rfi
 #else /* CONFIG_PPC_BOOK3E */
-        mfmsr   r11
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
+       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+       andc    r11,r11,r12
+       mtsrr1  r11
+       rfid
 #endif /* CONFIG_PPC_BOOK3E */
-        isync
 
-       /* Enter PROM here... */
-       blrl
+1:     /* Return from OF */
+       FIXUP_ENDIAN
 
        /* Just make sure that r1 top 32 bits didn't get
         * corrupt by OF
index 2d067049db27f1e56ca421c0a223e958125f60ad..68d74b45232d99a74c7485bc6228ebf2225703a7 100644 (file)
@@ -607,6 +607,7 @@ kernel_dbg_exc:
        NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
                                PROLOG_ADDITION_NONE)
        EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
+       CHECK_NAPPING()
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .performance_monitor_exception
        b       .ret_from_except_lite
index caeaabf11a2fbb3cd7d63555a19600f4e13e2618..4dca05e91e953e85b495722ca1a44633ca917988 100644 (file)
@@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                  \
 2:     REST_32VSRS(n,c,base);                                          \
 3:
 
-#define __REST_32FPVSRS_TRANSACT(n,c,base)                             \
-BEGIN_FTR_SECTION                                                      \
-       b       2f;                                                     \
-END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                    \
-       REST_32FPRS_TRANSACT(n,base);                                   \
-       b       3f;                                                     \
-2:     REST_32VSRS_TRANSACT(n,c,base);                                 \
-3:
-
 #define __SAVE_32FPVSRS(n,c,base)                                      \
 BEGIN_FTR_SECTION                                                      \
        b       2f;                                                     \
@@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                 \
 3:
 #else
 #define __REST_32FPVSRS(n,b,base)      REST_32FPRS(n, base)
-#define __REST_32FPVSRS_TRANSACT(n,b,base)     REST_32FPRS(n, base)
 #define __SAVE_32FPVSRS(n,b,base)      SAVE_32FPRS(n, base)
 #endif
 #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
-#define REST_32FPVSRS_TRANSACT(n,c,base) \
-       __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
 #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_fpu from C.
- * void do_load_up_fpu(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_fpu)
-       mflr    r0
-       std     r0, 16(r1)
-       stdu    r1, -112(r1)
-
-       subi    r6, r3, STACK_FRAME_OVERHEAD
-       /* load_up_fpu expects r12=MSR, r13=PACA, and returns
-        * with r12 = new MSR.
-        */
-       ld      r12,_MSR(r6)
-       GET_PACA(r13)
-
-       bl      load_up_fpu
-       std     r12,_MSR(r6)
-
-       ld      r0, 112+16(r1)
-       addi    r1, r1, 112
-       mtlr    r0
-       blr
-
-
 /* void do_load_up_transact_fpu(struct thread_struct *thread)
  *
  * This is similar to load_up_fpu but for the transactional version of the FP
@@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        SYNC
        MTMSRD(r5)
 
-       lfd     fr0,THREAD_TRANSACT_FPSCR(r3)
+       addi    r7,r3,THREAD_TRANSACT_FPSTATE
+       lfd     fr0,FPSTATE_FPSCR(r7)
        MTFSF_L(fr0)
-       REST_32FPVSRS_TRANSACT(0, R4, R3)
+       REST_32FPVSRS(0, R4, R7)
 
        /* FP/VSX off again */
        MTMSRD(r6)
@@ -116,6 +80,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        blr
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
+/*
+ * Load state from memory into FP registers including FPSCR.
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(load_fp_state)
+       lfd     fr0,FPSTATE_FPSCR(r3)
+       MTFSF_L(fr0)
+       REST_32FPVSRS(0, R4, R3)
+       blr
+
+/*
+ * Store FP state into memory, including FPSCR
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(store_fp_state)
+       SAVE_32FPVSRS(0, R4, R3)
+       mffs    fr0
+       stfd    fr0,FPSTATE_FPSCR(r3)
+       blr
+
 /*
  * This task wants to use the FPU now.
  * On UP, disable FP for the task which had the FPU previously,
@@ -147,9 +131,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        beq     1f
        toreal(r4)
        addi    r4,r4,THREAD            /* want last_task_used_math->thread */
-       SAVE_32FPVSRS(0, R5, R4)
+       addi    r8,r4,THREAD_FPSTATE
+       SAVE_32FPVSRS(0, R5, R8)
        mffs    fr0
-       stfd    fr0,THREAD_FPSCR(r4)
+       stfd    fr0,FPSTATE_FPSCR(r8)
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -160,7 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif /* CONFIG_SMP */
        /* enable use of FP after return */
 #ifdef CONFIG_PPC32
-       mfspr   r5,SPRN_SPRG_THREAD             /* current task's THREAD (phys) */
+       mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
        lwz     r4,THREAD_FPEXC_MODE(r5)
        ori     r9,r9,MSR_FP            /* enable FP for current */
        or      r9,r9,r4
@@ -172,9 +157,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        or      r12,r12,r4
        std     r12,_MSR(r1)
 #endif
-       lfd     fr0,THREAD_FPSCR(r5)
+       addi    r7,r5,THREAD_FPSTATE
+       lfd     fr0,FPSTATE_FPSCR(r7)
        MTFSF_L(fr0)
-       REST_32FPVSRS(0, R4, R5)
+       REST_32FPVSRS(0, R4, R7)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        fromreal(r4)
@@ -206,11 +192,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        PPC_LCMPI       0,r3,0
        beqlr-                          /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
+       PPC_LL  r6,THREAD_FPSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
-       PPC_LCMPI       0,r5,0
-       SAVE_32FPVSRS(0, R4 ,R3)
+       PPC_LCMPI       0,r6,0
+       bne     2f
+       addi    r6,r3,THREAD_FPSTATE
+2:     PPC_LCMPI       0,r5,0
+       SAVE_32FPVSRS(0, R4, R6)
        mffs    fr0
-       stfd    fr0,THREAD_FPSCR(r3)
+       stfd    fr0,FPSTATE_FPSCR(r6)
        beq     1f
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
        li      r3,MSR_FP|MSR_FE0|MSR_FE1
index 1fb78561096accfff7e60062a4fbc0bddfcfec55..9b27b293a9226903c81529a4f01aee64a7f07d23 100644 (file)
@@ -174,7 +174,11 @@ __ftrace_make_nop(struct module *mod,
 
        pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
 
+#ifdef __LITTLE_ENDIAN__
+       ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
+#else
        ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
+#endif
 
        /* This should match what was called */
        if (ptr != ppc_function_entry((void *)addr)) {
index 3d11d8038deec122ad9735202e98f4567998a070..2ae41aba40530f7facf916f6ce101ad3196a7363 100644 (file)
@@ -68,6 +68,7 @@ _stext:
 _GLOBAL(__start)
        /* NOP this out unconditionally */
 BEGIN_FTR_SECTION
+       FIXUP_ENDIAN
        b       .__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
 
@@ -115,6 +116,7 @@ __run_at_load:
  */
        .globl  __secondary_hold
 __secondary_hold:
+       FIXUP_ENDIAN
 #ifndef CONFIG_PPC_BOOK3E
        mfmsr   r24
        ori     r24,r24,MSR_RI
@@ -205,6 +207,7 @@ _GLOBAL(generic_secondary_thread_init)
  * as SCOM before entry).
  */
 _GLOBAL(generic_secondary_smp_init)
+       FIXUP_ENDIAN
        mr      r24,r3
        mr      r25,r4
 
index 0adab06ce5c0aaa3e73818951bb4764b047b9a61..572bb5b95f35d815f6f24fe7b59a67bf420fbe2b 100644 (file)
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
        /* number of bytes needed for the bitmap */
        sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 
-       page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+       page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
        if (!page)
                panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
        tbl->it_map = page_address(page);
index c69440cef7af43413987f3cd4b2c936cd0e8f788..c7cb8c232d2f4fdedf9129ec691d8471736e2fc7 100644 (file)
@@ -441,50 +441,6 @@ void migrate_irqs(void)
 }
 #endif
 
-static inline void handle_one_irq(unsigned int irq)
-{
-       struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit;
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       if (!desc)
-               return;
-
-       /* Switch to the irq stack to handle this */
-       curtp = current_thread_info();
-       irqtp = hardirq_ctx[smp_processor_id()];
-
-       if (curtp == irqtp) {
-               /* We're already on the irq stack, just handle it */
-               desc->handle_irq(irq, desc);
-               return;
-       }
-
-       saved_sp_limit = current->thread.ksp_limit;
-
-       irqtp->task = curtp->task;
-       irqtp->flags = 0;
-
-       /* Copy the softirq bits in preempt_count so that the
-        * softirq checks work in the hardirq context. */
-       irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-                              (curtp->preempt_count & SOFTIRQ_MASK);
-
-       current->thread.ksp_limit = (unsigned long)irqtp +
-               _ALIGN_UP(sizeof(struct thread_info), 16);
-
-       call_handle_irq(irq, desc, irqtp, desc->handle_irq);
-       current->thread.ksp_limit = saved_sp_limit;
-       irqtp->task = NULL;
-
-       /* Set any flag that may have been set on the
-        * alternate stack
-        */
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
-}
-
 static inline void check_stack_overflow(void)
 {
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
 #endif
 }
 
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct irq_desc *desc;
        unsigned int irq;
 
        irq_enter();
@@ -519,18 +475,57 @@ void do_IRQ(struct pt_regs *regs)
         */
        irq = ppc_md.get_irq();
 
-       /* We can hard enable interrupts now */
+       /* We can hard enable interrupts now to allow perf interrupts */
        may_hard_irq_enable();
 
        /* And finally process it */
-       if (irq != NO_IRQ)
-               handle_one_irq(irq);
-       else
+       if (unlikely(irq == NO_IRQ))
                __get_cpu_var(irq_stat).spurious_irqs++;
+       else {
+               desc = irq_to_desc(irq);
+               if (likely(desc))
+                       desc->handle_irq(irq, desc);
+       }
 
        trace_irq_exit(regs);
 
        irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct thread_info *curtp, *irqtp, *sirqtp;
+
+       /* Switch to the irq stack to handle this */
+       curtp = current_thread_info();
+       irqtp = hardirq_ctx[raw_smp_processor_id()];
+       sirqtp = softirq_ctx[raw_smp_processor_id()];
+
+       /* Already there ? */
+       if (unlikely(curtp == irqtp || curtp == sirqtp)) {
+               __do_irq(regs);
+               set_irq_regs(old_regs);
+               return;
+       }
+
+       /* Prepare the thread_info in the irq stack */
+       irqtp->task = curtp->task;
+       irqtp->flags = 0;
+
+       /* Copy the preempt_count so that the [soft]irq checks work. */
+       irqtp->preempt_count = curtp->preempt_count;
+
+       /* Switch stack and call */
+       call_do_irq(regs, irqtp);
+
+       /* Restore stack limit */
+       irqtp->task = NULL;
+
+       /* Copy back updates to the thread_info */
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+
        set_irq_regs(old_regs);
 }
 
@@ -592,28 +587,22 @@ void irq_ctx_init(void)
                memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
                tp = softirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = 0;
 
                memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
                tp = hardirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = HARDIRQ_OFFSET;
        }
 }
 
 static inline void do_softirq_onstack(void)
 {
        struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit = current->thread.ksp_limit;
 
        curtp = current_thread_info();
        irqtp = softirq_ctx[smp_processor_id()];
        irqtp->task = curtp->task;
        irqtp->flags = 0;
-       current->thread.ksp_limit = (unsigned long)irqtp +
-                                   _ALIGN_UP(sizeof(struct thread_info), 16);
        call_do_softirq(irqtp);
-       current->thread.ksp_limit = saved_sp_limit;
        irqtp->task = NULL;
 
        /* Set any flag that may have been set on the
index 22e88dd2f34ad7b43c27fe1361f8ed9213f9f960..40bd7bd4e19a88ee9b573c37deea8bc8694375b9 100644 (file)
@@ -35,7 +35,7 @@ static struct legacy_serial_info {
        phys_addr_t                     taddr;
 } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
 
-static struct __initdata of_device_id legacy_serial_parents[] = {
+static struct of_device_id legacy_serial_parents[] __initdata = {
        {.type = "soc",},
        {.type = "tsi-bridge",},
        {.type = "opb", },
index 777d999f563bb377bff3217358aacdc7667f1fd4..e47d268727a4a8ed87948a4d3ab5f87ad00793b1 100644 (file)
 
        .text
 
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
 _GLOBAL(call_do_softirq)
        mflr    r0
        stw     r0,4(r1)
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
        stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
        mr      r1,r3
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
        bl      __do_softirq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
        mflr    r0
        stw     r0,4(r1)
-       mtctr   r6
-       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
+       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
+       bl      __do_irq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
@@ -643,6 +658,20 @@ _GLOBAL(__lshrdi3)
        or      r4,r4,r7        # LSW |= t2
        blr
 
+/*
+ * 64-bit comparison: __cmpdi2(s64 a, s64 b)
+ * Returns 0 if a < b, 1 if a == b, 2 if a > b.
+ */
+_GLOBAL(__cmpdi2)
+       cmpw    r3,r5
+       li      r3,1
+       bne     1f
+       cmplw   r4,r6
+       beqlr
+1:     li      r3,0
+       bltlr
+       li      r3,2
+       blr
 /*
  * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
index 971d7e78aff20e1ca801dd923dfc77337e834ad5..e59caf874d05ed60e500579b06bfcdf38bd85125 100644 (file)
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
-       ld      r8,0(r6)
+_GLOBAL(call_do_irq)
        mflr    r0
        std     r0,16(r1)
-       mtctr   r8
-       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       bl      .__do_irq
        ld      r1,0(r1)
        ld      r0,16(r1)
        mtlr    r0
index 6ee59a0eb268b8614ef13b6745c3555bfdd7aebd..a102f4412392cfc7b76cfdc12e1de8f9fa22a880 100644 (file)
@@ -62,6 +62,16 @@ struct ppc64_stub_entry
    r2) into the stub. */
 static struct ppc64_stub_entry ppc64_stub =
 { .jump = {
+#ifdef __LITTLE_ENDIAN__
+       0x00, 0x00, 0x82, 0x3d, /* addis   r12,r2, <high> */
+       0x00, 0x00, 0x8c, 0x39, /* addi    r12,r12, <low> */
+       /* Save current r2 value in magic place on the stack. */
+       0x28, 0x00, 0x41, 0xf8, /* std     r2,40(r1) */
+       0x20, 0x00, 0x6c, 0xe9, /* ld      r11,32(r12) */
+       0x28, 0x00, 0x4c, 0xe8, /* ld      r2,40(r12) */
+       0xa6, 0x03, 0x69, 0x7d, /* mtctr   r11 */
+       0x20, 0x04, 0x80, 0x4e  /* bctr */
+#else
        0x3d, 0x82, 0x00, 0x00, /* addis   r12,r2, <high> */
        0x39, 0x8c, 0x00, 0x00, /* addi    r12,r12, <low> */
        /* Save current r2 value in magic place on the stack. */
@@ -70,6 +80,7 @@ static struct ppc64_stub_entry ppc64_stub =
        0xe8, 0x4c, 0x00, 0x28, /* ld      r2,40(r12) */
        0x7d, 0x69, 0x03, 0xa6, /* mtctr   r11 */
        0x4e, 0x80, 0x04, 0x20  /* bctr */
+#endif
 } };
 
 /* Count how many different 24-bit relocations (different symbol,
@@ -269,8 +280,13 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
 
        *entry = ppc64_stub;
 
+#ifdef __LITTLE_ENDIAN__
+       loc1 = (Elf64_Half *)&entry->jump[0];
+       loc2 = (Elf64_Half *)&entry->jump[4];
+#else
        loc1 = (Elf64_Half *)&entry->jump[2];
        loc2 = (Elf64_Half *)&entry->jump[6];
+#endif
 
        /* Stub uses address relative to r2. */
        reladdr = (unsigned long)entry - my_r2(sechdrs, me);
index 3fc16e3beb9f8dca2ea5f260bb59d2dc20bef3f3..0620eaaaad45b419c70475485a9d052c0ddbe246 100644 (file)
@@ -46,7 +46,7 @@ struct lppaca lppaca[] = {
 static struct lppaca *extra_lppacas;
 static long __initdata lppaca_size;
 
-static void allocate_lppacas(int nr_cpus, unsigned long limit)
+static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
 {
        if (nr_cpus <= NR_LPPACAS)
                return;
@@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit)
                                                 PAGE_SIZE, limit));
 }
 
-static struct lppaca *new_lppaca(int cpu)
+static struct lppaca * __init new_lppaca(int cpu)
 {
        struct lppaca *lp;
 
@@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu)
        return lp;
 }
 
-static void free_lppacas(void)
+static void __init free_lppacas(void)
 {
        long new_size = 0, nr;
 
index 21646dbe1bb3c7a48df59ba14dea18431abe12be..3bd77edd7610ce20267a880069972624eafed62e 100644 (file)
@@ -79,10 +79,12 @@ EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strcmp);
 EXPORT_SYMBOL(strncmp);
 
+#ifndef CONFIG_GENERIC_CSUM
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(ip_fast_csum);
 EXPORT_SYMBOL(csum_tcpudp_magic);
+#endif
 
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__clear_user);
@@ -98,9 +100,13 @@ EXPORT_SYMBOL(start_thread);
 
 #ifdef CONFIG_PPC_FPU
 EXPORT_SYMBOL(giveup_fpu);
+EXPORT_SYMBOL(load_fp_state);
+EXPORT_SYMBOL(store_fp_state);
 #endif
 #ifdef CONFIG_ALTIVEC
 EXPORT_SYMBOL(giveup_altivec);
+EXPORT_SYMBOL(load_vr_state);
+EXPORT_SYMBOL(store_vr_state);
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
 EXPORT_SYMBOL(giveup_vsx);
@@ -143,10 +149,14 @@ EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__lshrdi3);
 int __ucmpdi2(unsigned long long, unsigned long long);
 EXPORT_SYMBOL(__ucmpdi2);
+int __cmpdi2(long long, long long);
+EXPORT_SYMBOL(__cmpdi2);
 #endif
 long long __bswapdi2(long long);
 EXPORT_SYMBOL(__bswapdi2);
+#ifdef __BIG_ENDIAN__
 EXPORT_SYMBOL(memcpy);
+#endif
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memcmp);
index 6f428da53e2085b877334270286069e2ba54da37..8649a3d629e1b9f2b4bce242e3873f0b28f881cc 100644 (file)
@@ -1000,13 +1000,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
        p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
                                _ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        p->thread.ptrace_bps[0] = NULL;
 #endif
 
+       p->thread.fp_save_area = NULL;
+#ifdef CONFIG_ALTIVEC
+       p->thread.vr_save_area = NULL;
+#endif
+
 #ifdef CONFIG_PPC_STD_MMU_64
        if (mmu_has_feature(MMU_FTR_SLB)) {
                unsigned long sp_vsid;
@@ -1112,12 +1118,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
-       memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = 0;
+       memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
+       current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
-       memset(current->thread.vr, 0, sizeof(current->thread.vr));
-       memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
-       current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+       memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
+       current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
+       current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
index 12e656ffe60ea86a00a531578efd55ef98e3faef..cb64a6e1dc5186918b4d2052e52e961cfb14eedb 100644 (file)
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
 
 static cell_t __initdata regbuf[1024];
 
+static bool rtas_has_query_cpu_stopped;
+
 
 /*
  * Error results ... some OF calls will return "-1" on error, some
@@ -856,7 +858,8 @@ static void __init prom_send_capabilities(void)
 {
        ihandle root;
        prom_arg_t ret;
-       __be32 *cores;
+       u32 cores;
+       unsigned char *ptcores;
 
        root = call_prom("open", 1, 1, ADDR("/"));
        if (root != 0) {
@@ -866,15 +869,30 @@ static void __init prom_send_capabilities(void)
                 * (we assume this is the same for all cores) and use it to
                 * divide NR_CPUS.
                 */
-               cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
-               if (be32_to_cpup(cores) != NR_CPUS) {
+
+               /* The core value may start at an odd address. If such a word
+                * access is made at a cache line boundary, this leads to an
+                * exception which may not be handled at this time.
+                * Forcing a per byte access to avoid exception.
+                */
+               ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
+               cores = 0;
+               cores |= ptcores[0] << 24;
+               cores |= ptcores[1] << 16;
+               cores |= ptcores[2] << 8;
+               cores |= ptcores[3];
+               if (cores != NR_CPUS) {
                        prom_printf("WARNING ! "
                                    "ibm_architecture_vec structure inconsistent: %lu!\n",
-                                   be32_to_cpup(cores));
+                                   cores);
                } else {
-                       *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
+                       cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
                        prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
-                                   be32_to_cpup(cores), NR_CPUS);
+                                   cores, NR_CPUS);
+                       ptcores[0] = (cores >> 24) & 0xff;
+                       ptcores[1] = (cores >> 16) & 0xff;
+                       ptcores[2] = (cores >> 8) & 0xff;
+                       ptcores[3] = cores & 0xff;
                }
 
                /* try calling the ibm,client-architecture-support method */
@@ -1574,6 +1592,11 @@ static void __init prom_instantiate_rtas(void)
        prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
                     &val, sizeof(val));
 
+       /* Check if it supports "query-cpu-stopped-state" */
+       if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+                        &val, sizeof(val)) != PROM_ERROR)
+               rtas_has_query_cpu_stopped = true;
+
 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
        /* PowerVN takeover hack */
        prom_rtas_data = base;
@@ -1815,6 +1838,18 @@ static void __init prom_hold_cpus(void)
                = (void *) LOW_ADDR(__secondary_hold_acknowledge);
        unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
 
+       /*
+        * On pseries, if RTAS supports "query-cpu-stopped-state",
+        * we skip this stage, the CPUs will be started by the
+        * kernel using RTAS.
+        */
+       if ((of_platform == PLATFORM_PSERIES ||
+            of_platform == PLATFORM_PSERIES_LPAR) &&
+           rtas_has_query_cpu_stopped) {
+               prom_printf("prom_hold_cpus: skipped\n");
+               return;
+       }
+
        prom_debug("prom_hold_cpus: start...\n");
        prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
        prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
@@ -3011,6 +3046,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         * On non-powermacs, put all CPUs in spin-loops.
         *
         * PowerMacs use a different mechanism to spin CPUs
+        *
+        * (This must be done after instanciating RTAS)
         */
        if (of_platform != PLATFORM_POWERMAC &&
            of_platform != PLATFORM_OPAL)
index 9a0d24c390a3535e16c934f80ec19695da04d095..1ca589c9ec6d5a85433dac34ea590ab379e7144c 100644 (file)
@@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
                   void *kbuf, void __user *ubuf)
 {
 #ifdef CONFIG_VSX
-       double buf[33];
+       u64 buf[33];
        int i;
 #endif
        flush_fp_to_thread(target);
@@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
        /* copy to local buffer then write that out */
        for (i = 0; i < 32 ; i++)
                buf[i] = target->thread.TS_FPR(i);
-       memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
+       buf[32] = target->thread.fp_state.fpscr;
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 
 #else
-       BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-                    offsetof(struct thread_struct, TS_FPR(32)));
+       BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+                    offsetof(struct thread_fp_state, fpr[32][0]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                  &target->thread.fpr, 0, -1);
+                                  &target->thread.fp_state, 0, -1);
 #endif
 }
 
@@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
 #ifdef CONFIG_VSX
-       double buf[33];
+       u64 buf[33];
        int i;
 #endif
        flush_fp_to_thread(target);
@@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                return i;
        for (i = 0; i < 32 ; i++)
                target->thread.TS_FPR(i) = buf[i];
-       memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
+       target->thread.fp_state.fpscr = buf[32];
        return 0;
 #else
-       BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-                    offsetof(struct thread_struct, TS_FPR(32)));
+       BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+                    offsetof(struct thread_fp_state, fpr[32][0]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                 &target->thread.fpr, 0, -1);
+                                 &target->thread.fp_state, 0, -1);
 #endif
 }
 
@@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
 
        flush_altivec_to_thread(target);
 
-       BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
-                    offsetof(struct thread_struct, vr[32]));
+       BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+                    offsetof(struct thread_vr_state, vr[32]));
 
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                 &target->thread.vr, 0,
+                                 &target->thread.vr_state, 0,
                                  33 * sizeof(vector128));
        if (!ret) {
                /*
@@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
 
        flush_altivec_to_thread(target);
 
-       BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
-                    offsetof(struct thread_struct, vr[32]));
+       BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+                    offsetof(struct thread_vr_state, vr[32]));
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &target->thread.vr, 0, 33 * sizeof(vector128));
+                                &target->thread.vr_state, 0,
+                                33 * sizeof(vector128));
        if (!ret && count > 0) {
                /*
                 * We use only the first word of vrsave.
@@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
-       double buf[32];
+       u64 buf[32];
        int ret, i;
 
        flush_vsx_to_thread(target);
 
        for (i = 0; i < 32 ; i++)
-               buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                  buf, 0, 32 * sizeof(double));
 
@@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   const void *kbuf, const void __user *ubuf)
 {
-       double buf[32];
+       u64 buf[32];
        int ret,i;
 
        flush_vsx_to_thread(target);
@@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        for (i = 0; i < 32 ; i++)
-               target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 
 
        return ret;
@@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               tmp = ((unsigned long *)child->thread.fpr)
-                                       [fpidx * TS_FPRWIDTH];
+                               memcpy(&tmp, &child->thread.fp_state.fpr,
+                                      sizeof(long));
                        else
-                               tmp = child->thread.fpscr.val;
+                               tmp = child->thread.fp_state.fpscr;
                }
                ret = put_user(tmp, datalp);
                break;
@@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               ((unsigned long *)child->thread.fpr)
-                                       [fpidx * TS_FPRWIDTH] = data;
+                               memcpy(&child->thread.fp_state.fpr, &data,
+                                      sizeof(long));
                        else
-                               child->thread.fpscr.val = data;
+                               child->thread.fp_state.fpscr = data;
                        ret = 0;
                }
                break;
index f51599e941c7661b281a5130570b2f5e9d701ec3..097f8dc426a017accf9de0297dc892a40c6c8272 100644 (file)
@@ -43,7 +43,6 @@
 #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
 #define FPRHALF(i) (((i) - PT_FPR0) & 1)
 #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
-#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
 
 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        compat_ulong_t caddr, compat_ulong_t cdata)
@@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                         * to be an array of unsigned int (32 bits) - the
                         * index passed in is based on this assumption.
                         */
-                       tmp = ((unsigned int *)child->thread.fpr)
+                       tmp = ((unsigned int *)child->thread.fp_state.fpr)
                                [FPRINDEX(index)];
                }
                ret = put_user((unsigned int)tmp, (u32 __user *)data);
@@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                if (numReg >= PT_FPR0) {
                        flush_fp_to_thread(child);
                        /* get 64 bit FPR */
-                       tmp = ((u64 *)child->thread.fpr)
-                               [FPRINDEX_3264(numReg)];
+                       tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
                } else { /* register within PT_REGS struct */
                        unsigned long tmp2;
                        ret = ptrace_get_reg(child, numReg, &tmp2);
@@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                         * to be an array of unsigned int (32 bits) - the
                         * index passed in is based on this assumption.
                         */
-                       ((unsigned int *)child->thread.fpr)
+                       ((unsigned int *)child->thread.fp_state.fpr)
                                [FPRINDEX(index)] = data;
                        ret = 0;
                }
@@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        u64 *tmp;
                        flush_fp_to_thread(child);
                        /* get 64 bit FPR ... */
-                       tmp = &(((u64 *)child->thread.fpr)
-                               [FPRINDEX_3264(numReg)]);
+                       tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
                        /* ... write the 32 bit part we want */
                        ((u32 *)tmp)[index % 2] = data;
                        ret = 0;
index 6e7b7cdeec6541135dad27df9f23bf64ac16395b..7d4c7172f38ed43d85873c23f5b1e63e733ea093 100644 (file)
@@ -223,7 +223,7 @@ unsigned long get_phb_buid(struct device_node *phb)
 static int phb_set_bus_ranges(struct device_node *dev,
                              struct pci_controller *phb)
 {
-       const int *bus_range;
+       const __be32 *bus_range;
        unsigned int len;
 
        bus_range = of_get_property(dev, "bus-range", &len);
@@ -231,8 +231,8 @@ static int phb_set_bus_ranges(struct device_node *dev,
                return 1;
        }
 
-       phb->first_busno =  bus_range[0];
-       phb->last_busno  =  bus_range[1];
+       phb->first_busno = be32_to_cpu(bus_range[0]);
+       phb->last_busno  = be32_to_cpu(bus_range[1]);
 
        return 0;
 }
index bebdf1a1a5403df741ea389102f1b69b80daf60e..c094e28b3f102acfb21e6b4458b3ccdb26eb15bc 100644 (file)
@@ -265,27 +265,27 @@ struct rt_sigframe {
 unsigned long copy_fpr_to_user(void __user *to,
                               struct task_struct *task)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                buf[i] = task->thread.TS_FPR(i);
-       memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
+       buf[i] = task->thread.fp_state.fpscr;
        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 }
 
 unsigned long copy_fpr_from_user(struct task_struct *task,
                                 void __user *from)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
                return 1;
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                task->thread.TS_FPR(i) = buf[i];
-       memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
+       task->thread.fp_state.fpscr = buf[i];
 
        return 0;
 }
@@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task,
 unsigned long copy_vsx_to_user(void __user *to,
                               struct task_struct *task)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < ELF_NVSRHALFREG; i++)
-               buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 }
 
 unsigned long copy_vsx_from_user(struct task_struct *task,
                                 void __user *from)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
                return 1;
        for (i = 0; i < ELF_NVSRHALFREG ; i++)
-               task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
        return 0;
 }
 
@@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
 unsigned long copy_transact_fpr_to_user(void __user *to,
                                  struct task_struct *task)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                buf[i] = task->thread.TS_TRANS_FPR(i);
-       memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
+       buf[i] = task->thread.transact_fp.fpscr;
        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 }
 
 unsigned long copy_transact_fpr_from_user(struct task_struct *task,
                                          void __user *from)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
                return 1;
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                task->thread.TS_TRANS_FPR(i) = buf[i];
-       memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
+       task->thread.transact_fp.fpscr = buf[i];
 
        return 0;
 }
@@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
 unsigned long copy_transact_vsx_to_user(void __user *to,
                                  struct task_struct *task)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < ELF_NVSRHALFREG; i++)
-               buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 }
 
 unsigned long copy_transact_vsx_from_user(struct task_struct *task,
                                          void __user *from)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
                return 1;
        for (i = 0; i < ELF_NVSRHALFREG ; i++)
-               task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
        return 0;
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
 inline unsigned long copy_fpr_to_user(void __user *to,
                                      struct task_struct *task)
 {
-       return __copy_to_user(to, task->thread.fpr,
+       return __copy_to_user(to, task->thread.fp_state.fpr,
                              ELF_NFPREG * sizeof(double));
 }
 
 inline unsigned long copy_fpr_from_user(struct task_struct *task,
                                        void __user *from)
 {
-       return __copy_from_user(task->thread.fpr, from,
+       return __copy_from_user(task->thread.fp_state.fpr, from,
                              ELF_NFPREG * sizeof(double));
 }
 
@@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
 inline unsigned long copy_transact_fpr_to_user(void __user *to,
                                         struct task_struct *task)
 {
-       return __copy_to_user(to, task->thread.transact_fpr,
+       return __copy_to_user(to, task->thread.transact_fp.fpr,
                              ELF_NFPREG * sizeof(double));
 }
 
 inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
                                                 void __user *from)
 {
-       return __copy_from_user(task->thread.transact_fpr, from,
+       return __copy_from_user(task->thread.transact_fp.fpr, from,
                                ELF_NFPREG * sizeof(double));
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
        /* save altivec registers */
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
-               if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+               if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
                                   ELF_NVRREG * sizeof(vector128)))
                        return 1;
                /* set MSR_VEC in the saved MSR value to indicate that
@@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
        /* save altivec registers */
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
-               if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+               if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
                                   ELF_NVRREG * sizeof(vector128)))
                        return 1;
                if (msr & MSR_VEC) {
                        if (__copy_to_user(&tm_frame->mc_vregs,
-                                          current->thread.transact_vr,
+                                          &current->thread.transact_vr,
                                           ELF_NVRREG * sizeof(vector128)))
                                return 1;
                } else {
                        if (__copy_to_user(&tm_frame->mc_vregs,
-                                          current->thread.vr,
+                                          &current->thread.vr_state,
                                           ELF_NVRREG * sizeof(vector128)))
                                return 1;
                }
@@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs,
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
                /* restore altivec registers from the stack */
-               if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+               if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
                                     sizeof(sr->mc_vregs)))
                        return 1;
        } else if (current->thread.used_vr)
-               memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
+               memset(&current->thread.vr_state, 0,
+                      ELF_NVRREG * sizeof(vector128));
 
        /* Always get VRSAVE back */
        if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
@@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs,
                        return 1;
        } else if (current->thread.used_vsr)
                for (i = 0; i < 32 ; i++)
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 #endif /* CONFIG_VSX */
        /*
         * force the process to reload the FP registers from
@@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
                /* restore altivec registers from the stack */
-               if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+               if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
                                     sizeof(sr->mc_vregs)) ||
-                   __copy_from_user(current->thread.transact_vr,
+                   __copy_from_user(&current->thread.transact_vr,
                                     &tm_sr->mc_vregs,
                                     sizeof(sr->mc_vregs)))
                        return 1;
        } else if (current->thread.used_vr) {
-               memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
-               memset(current->thread.transact_vr, 0,
+               memset(&current->thread.vr_state, 0,
+                      ELF_NVRREG * sizeof(vector128));
+               memset(&current->thread.transact_vr, 0,
                       ELF_NVRREG * sizeof(vector128));
        }
 
@@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                        return 1;
        } else if (current->thread.used_vsr)
                for (i = 0; i < 32 ; i++) {
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-                       current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
                }
 #endif /* CONFIG_VSX */
 
@@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
                if (__put_user(0, &rt_sf->uc.uc_link))
                        goto badframe;
 
-       current->thread.fpscr.val = 0;  /* turn off all fp exceptions */
+       current->thread.fp_state.fpscr = 0;     /* turn off all fp exceptions */
 
        /* create a stack frame for the caller of the handler */
        newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1045,8 +1047,9 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->gpr[5] = (unsigned long) &rt_sf->uc;
        regs->gpr[6] = (unsigned long) rt_sf;
        regs->nip = (unsigned long) ka->sa.sa_handler;
-       /* enter the signal handler in big-endian mode */
+       /* enter the signal handler in native-endian mode */
        regs->msr &= ~MSR_LE;
+       regs->msr |= (MSR_KERNEL & MSR_LE);
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
         * just indicates to userland that we were doing a transaction, but we
@@ -1462,7 +1465,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 
        regs->link = tramp;
 
-       current->thread.fpscr.val = 0;  /* turn off all fp exceptions */
+       current->thread.fp_state.fpscr = 0;     /* turn off all fp exceptions */
 
        /* create a stack frame for the caller of the handler */
        newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
index f93ec2835a13f01294a9b3d5c225686a04666702..b3c615764c9b97bcb510d017bd9c8ff33e6d69ec 100644 (file)
@@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
-               err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
+               err |= __copy_to_user(v_regs, &current->thread.vr_state,
+                                     33 * sizeof(vector128));
                /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
                 * contains valid data.
                 */
@@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
-               err |= __copy_to_user(v_regs, current->thread.vr,
+               err |= __copy_to_user(v_regs, &current->thread.vr_state,
                                      33 * sizeof(vector128));
                /* If VEC was enabled there are transactional VRs valid too,
                 * else they're a copy of the checkpointed VRs.
                 */
                if (msr & MSR_VEC)
                        err |= __copy_to_user(tm_v_regs,
-                                             current->thread.transact_vr,
+                                             &current->thread.transact_vr,
                                              33 * sizeof(vector128));
                else
                        err |= __copy_to_user(tm_v_regs,
-                                             current->thread.vr,
+                                             &current->thread.vr_state,
                                              33 * sizeof(vector128));
 
                /* set MSR_VEC in the MSR value in the frame to indicate
@@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
        if (v_regs != NULL && (msr & MSR_VEC) != 0)
-               err |= __copy_from_user(current->thread.vr, v_regs,
+               err |= __copy_from_user(&current->thread.vr_state, v_regs,
                                        33 * sizeof(vector128));
        else if (current->thread.used_vr)
-               memset(current->thread.vr, 0, 33 * sizeof(vector128));
+               memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
        /* Always get VRSAVE back */
        if (v_regs != NULL)
                err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
@@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
                err |= copy_vsx_from_user(current, v_regs);
        else
                for (i = 0; i < 32 ; i++)
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 #endif
        return err;
 }
@@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
        if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
-               err |= __copy_from_user(current->thread.vr, v_regs,
+               err |= __copy_from_user(&current->thread.vr_state, v_regs,
                                        33 * sizeof(vector128));
-               err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
+               err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
                                        33 * sizeof(vector128));
        }
        else if (current->thread.used_vr) {
-               memset(current->thread.vr, 0, 33 * sizeof(vector128));
-               memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
+               memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
+               memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
        }
        /* Always get VRSAVE back */
        if (v_regs != NULL && tm_v_regs != NULL) {
@@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                err |= copy_transact_vsx_from_user(current, tm_v_regs);
        } else {
                for (i = 0; i < 32 ; i++) {
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-                       current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
                }
        }
 #endif
@@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
                goto badframe;
 
        /* Make sure signal handler doesn't get spurious FP exceptions */
-       current->thread.fpscr.val = 0;
+       current->thread.fp_state.fpscr = 0;
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
         * just indicates to userland that we were doing a transaction, but we
@@ -773,8 +774,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
 
        /* Set up "regs" so we "return" to the signal handler. */
        err |= get_user(regs->nip, &funct_desc_ptr->entry);
-       /* enter the signal handler in big-endian mode */
+       /* enter the signal handler in native-endian mode */
        regs->msr &= ~MSR_LE;
+       regs->msr |= (MSR_KERNEL & MSR_LE);
        regs->gpr[1] = newsp;
        err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
        regs->gpr[3] = signr;
index 8e59abc237d7f17c96effa20583206e75b2e4b7a..930cd8af35035441031e1abecbdde3472ab48808 100644 (file)
@@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu)
                smp_ops->cpu_die(cpu);
 }
 
-static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
-       mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
-       mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
 void cpu_die(void)
 {
        if (ppc_md.cpu_die)
index 27a90b99ef6744d20fa6664bb1c6244626a5cd0d..b4e667663d9bfc2e9954435790b0ebc5b6fc94ca 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/machdep.h>
 #include <asm/smp.h>
 #include <asm/pmc.h>
+#include <asm/firmware.h>
 
 #include "cacheinfo.h"
 
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
 SYSFS_PMCSETUP(pir, SPRN_PIR);
 
+/*
+  Lets only enable read for phyp resources and
+  enable write when needed with a separate function.
+  Lets be conservative and default to pseries.
+*/
 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
 
 unsigned long dscr_default = 0;
 EXPORT_SYMBOL(dscr_default);
 
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+       attr->attr.mode |= 0200;
+}
+
 static ssize_t show_dscr_default(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
        if (cpu_has_feature(CPU_FTR_MMCRA))
                device_create_file(s, &dev_attr_mmcra);
 
-       if (cpu_has_feature(CPU_FTR_PURR))
+       if (cpu_has_feature(CPU_FTR_PURR)) {
+               if (!firmware_has_feature(FW_FEATURE_LPAR))
+                       add_write_permission_dev_attr(&dev_attr_purr);
                device_create_file(s, &dev_attr_purr);
+       }
 
        if (cpu_has_feature(CPU_FTR_SPURR))
                device_create_file(s, &dev_attr_spurr);
index 7b60b98514691ee554d1140e408c9802f83d21a4..761af4f0a632bab2ec41754768cdd72496fc3c20 100644 (file)
 #include <asm/reg.h>
 
 #ifdef CONFIG_VSX
-/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base)  \
+/* See fpu.S, this is borrowed from there */
+#define __SAVE_32FPRS_VSRS(n,c,base)           \
 BEGIN_FTR_SECTION                              \
        b       2f;                             \
 END_FTR_SECTION_IFSET(CPU_FTR_VSX);            \
-       SAVE_32FPRS_TRANSACT(n,base);           \
+       SAVE_32FPRS(n,base);                    \
        b       3f;                             \
-2:     SAVE_32VSRS_TRANSACT(n,c,base);         \
+2:     SAVE_32VSRS(n,c,base);                  \
 3:
-/* ...and this is just plain borrowed from there. */
 #define __REST_32FPRS_VSRS(n,c,base)           \
 BEGIN_FTR_SECTION                              \
        b       2f;                             \
@@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);         \
 2:     REST_32VSRS(n,c,base);                  \
 3:
 #else
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base)
-#define __REST_32FPRS_VSRS(n,c,base)         REST_32FPRS(n, base)
+#define __SAVE_32FPRS_VSRS(n,c,base)   SAVE_32FPRS(n, base)
+#define __REST_32FPRS_VSRS(n,c,base)   REST_32FPRS(n, base)
 #endif
-#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
-       __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base)
+#define SAVE_32FPRS_VSRS(n,c,base) \
+       __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
 #define REST_32FPRS_VSRS(n,c,base) \
        __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
 
@@ -79,6 +78,11 @@ _GLOBAL(tm_abort)
        TABORT(R3)
        blr
 
+       .section        ".toc","aw"
+DSCR_DEFAULT:
+       .tc dscr_default[TC],dscr_default
+
+       .section        ".text"
 
 /* void tm_reclaim(struct thread_struct *thread,
  *                 unsigned long orig_msr,
@@ -123,6 +127,7 @@ _GLOBAL(tm_reclaim)
        mr      r15, r14
        ori     r15, r15, MSR_FP
        li      r16, MSR_RI
+       ori     r16, r16, MSR_EE /* IRQs hard off */
        andc    r15, r15, r16
        oris    r15, r15, MSR_VEC@h
 #ifdef CONFIG_VSX
@@ -151,10 +156,11 @@ _GLOBAL(tm_reclaim)
        andis.          r0, r4, MSR_VEC@h
        beq     dont_backup_vec
 
-       SAVE_32VRS_TRANSACT(0, r6, r3)  /* r6 scratch, r3 thread */
+       addi    r7, r3, THREAD_TRANSACT_VRSTATE
+       SAVE_32VRS(0, r6, r7)   /* r6 scratch, r7 transact vr state */
        mfvscr  vr0
-       li      r6, THREAD_TRANSACT_VSCR
-       stvx    vr0, r3, r6
+       li      r6, VRSTATE_VSCR
+       stvx    vr0, r7, r6
 dont_backup_vec:
        mfspr   r0, SPRN_VRSAVE
        std     r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -162,10 +168,11 @@ dont_backup_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_backup_fp
 
-       SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3)    /* r6 scratch, r3 thread */
+       addi    r7, r3, THREAD_TRANSACT_FPSTATE
+       SAVE_32FPRS_VSRS(0, R6, R7)     /* r6 scratch, r7 transact fp state */
 
        mffs    fr0
-       stfd    fr0,THREAD_TRANSACT_FPSCR(r3)
+       stfd    fr0,FPSTATE_FPSCR(r7)
 
 dont_backup_fp:
        /* The moment we treclaim, ALL of our GPRs will switch
@@ -187,11 +194,18 @@ dont_backup_fp:
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
+       /* Store the PPR in r11 and reset to decent value */
+       std     r11, GPR11(r1)                  /* Temporary stash */
+       mfspr   r11, SPRN_PPR
+       HMT_MEDIUM
+
        /* Now get some more GPRS free */
        std     r7, GPR7(r1)                    /* Temporary stash */
        std     r12, GPR12(r1)                  /* ''   ''    ''   */
        ld      r12, STACK_PARAM(0)(r1)         /* Param 0, thread_struct * */
 
+       std     r11, THREAD_TM_PPR(r12)         /* Store PPR and free r11 */
+
        addi    r7, r12, PT_CKPT_REGS           /* Thread's ckpt_regs */
 
        /* Make r7 look like an exception frame so that we
@@ -203,15 +217,19 @@ dont_backup_fp:
        SAVE_GPR(0, r7)                         /* user r0 */
        SAVE_GPR(2, r7)                 /* user r2 */
        SAVE_4GPRS(3, r7)                       /* user r3-r6 */
-       SAVE_4GPRS(8, r7)                       /* user r8-r11 */
+       SAVE_GPR(8, r7)                         /* user r8 */
+       SAVE_GPR(9, r7)                         /* user r9 */
+       SAVE_GPR(10, r7)                        /* user r10 */
        ld      r3, PACATMSCRATCH(r13)          /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
-       ld      r5, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(6)                         /* user r13 */
+       ld      r5, GPR11(r1)                   /* user r11 */
+       ld      r6, GPR12(r1)                   /* user r12 */
+       GET_SCRATCH0(8)                         /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
-       std     r5, GPR12(r7)
-       std     r6, GPR13(r7)
+       std     r5, GPR11(r7)
+       std     r6, GPR12(r7)
+       std     r8, GPR13(r7)
 
        SAVE_NVGPRS(r7)                         /* user r14-r31 */
 
@@ -234,14 +252,12 @@ dont_backup_fp:
        std     r6, _XER(r7)
 
 
-       /* ******************** TAR, PPR, DSCR ********** */
+       /* ******************** TAR, DSCR ********** */
        mfspr   r3, SPRN_TAR
-       mfspr   r4, SPRN_PPR
-       mfspr   r5, SPRN_DSCR
+       mfspr   r4, SPRN_DSCR
 
        std     r3, THREAD_TM_TAR(r12)
-       std     r4, THREAD_TM_PPR(r12)
-       std     r5, THREAD_TM_DSCR(r12)
+       std     r4, THREAD_TM_DSCR(r12)
 
        /* MSR and flags:  We don't change CRs, and we don't need to alter
         * MSR.
@@ -258,7 +274,7 @@ dont_backup_fp:
        std     r3, THREAD_TM_TFHAR(r12)
        std     r4, THREAD_TM_TFIAR(r12)
 
-       /* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+       /* AMR is checkpointed too, but is unsupported by Linux. */
 
        /* Restore original MSR/IRQ state & clear TM mode */
        ld      r14, TM_FRAME_L0(r1)            /* Orig MSR */
@@ -274,6 +290,12 @@ dont_backup_fp:
        mtcr    r4
        mtlr    r0
        ld      r2, 40(r1)
+
+       /* Load system default DSCR */
+       ld      r4, DSCR_DEFAULT@toc(r2)
+       ld      r0, 0(r4)
+       mtspr   SPRN_DSCR, r0
+
        blr
 
 
@@ -337,10 +359,11 @@ _GLOBAL(tm_recheckpoint)
        andis.  r0, r4, MSR_VEC@h
        beq     dont_restore_vec
 
-       li      r5, THREAD_VSCR
-       lvx     vr0, r3, r5
+       addi    r8, r3, THREAD_VRSTATE
+       li      r5, VRSTATE_VSCR
+       lvx     vr0, r8, r5
        mtvscr  vr0
-       REST_32VRS(0, r5, r3)                   /* r5 scratch, r3 THREAD ptr */
+       REST_32VRS(0, r5, r8)                   /* r5 scratch, r8 ptr */
 dont_restore_vec:
        ld      r5, THREAD_VRSAVE(r3)
        mtspr   SPRN_VRSAVE, r5
@@ -349,34 +372,34 @@ dont_restore_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_restore_fp
 
-       lfd     fr0, THREAD_FPSCR(r3)
+       addi    r8, r3, THREAD_FPSTATE
+       lfd     fr0, FPSTATE_FPSCR(r8)
        MTFSF_L(fr0)
-       REST_32FPRS_VSRS(0, R4, R3)
+       REST_32FPRS_VSRS(0, R4, R8)
 
 dont_restore_fp:
        mtmsr   r6                              /* FP/Vec off again! */
 
 restore_gprs:
 
-       /* ******************** TAR, PPR, DSCR ********** */
-       ld      r4, THREAD_TM_TAR(r3)
-       ld      r5, THREAD_TM_PPR(r3)
-       ld      r6, THREAD_TM_DSCR(r3)
+       /* ******************** CR,LR,CCR,MSR ********** */
+       ld      r4, _CTR(r7)
+       ld      r5, _LINK(r7)
+       ld      r6, _CCR(r7)
+       ld      r8, _XER(r7)
 
-       mtspr   SPRN_TAR,       r4
-       mtspr   SPRN_PPR,       r5
-       mtspr   SPRN_DSCR,      r6
+       mtctr   r4
+       mtlr    r5
+       mtcr    r6
+       mtxer   r8
 
-       /* ******************** CR,LR,CCR,MSR ********** */
-       ld      r3, _CTR(r7)
-       ld      r4, _LINK(r7)
-       ld      r5, _CCR(r7)
-       ld      r6, _XER(r7)
+       /* ******************** TAR ******************** */
+       ld      r4, THREAD_TM_TAR(r3)
+       mtspr   SPRN_TAR,       r4
 
-       mtctr   r3
-       mtlr    r4
-       mtcr    r5
-       mtxer   r6
+       /* Load up the PPR and DSCR in GPRs only at this stage */
+       ld      r5, THREAD_TM_DSCR(r3)
+       ld      r6, THREAD_TM_PPR(r3)
 
        /* Clear the MSR RI since we are about to change R1.  EE is already off
         */
@@ -384,19 +407,26 @@ restore_gprs:
        mtmsrd  r4, 1
 
        REST_4GPRS(0, r7)                       /* GPR0-3 */
-       REST_GPR(4, r7)                         /* GPR4-6 */
-       REST_GPR(5, r7)
-       REST_GPR(6, r7)
+       REST_GPR(4, r7)                         /* GPR4 */
        REST_4GPRS(8, r7)                       /* GPR8-11 */
        REST_2GPRS(12, r7)                      /* GPR12-13 */
 
        REST_NVGPRS(r7)                         /* GPR14-31 */
 
-       ld      r7, GPR7(r7)                    /* GPR7 */
+       /* Load up PPR and DSCR here so we don't run with user values for long
+        */
+       mtspr   SPRN_DSCR, r5
+       mtspr   SPRN_PPR, r6
+
+       REST_GPR(5, r7)                         /* GPR5-7 */
+       REST_GPR(6, r7)
+       ld      r7, GPR7(r7)
 
        /* Commit register state as checkpointed state: */
        TRECHKPT
 
+       HMT_MEDIUM
+
        /* Our transactional state has now changed.
         *
         * Now just get out of here.  Transactional (current) state will be
@@ -419,6 +449,12 @@ restore_gprs:
        mtcr    r4
        mtlr    r0
        ld      r2, 40(r1)
+
+       /* Load system default DSCR */
+       ld      r4, DSCR_DEFAULT@toc(r2)
+       ld      r0, 0(r4)
+       mtspr   SPRN_DSCR, r0
+
        blr
 
        /* ****************************************************************** */
index f783c932faeb3717eca6136cab5ab350f01e9a6e..f0a6814007a521649be57a7b08de14f19493ceb0 100644 (file)
@@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs)
 
        flush_fp_to_thread(current);
 
-       code = __parse_fpscr(current->thread.fpscr.val);
+       code = __parse_fpscr(current->thread.fp_state.fpscr);
 
        _exception(SIGFPE, regs, code, regs->nip);
 }
@@ -1069,7 +1069,7 @@ static int emulate_math(struct pt_regs *regs)
                return 0;
        case 1: {
                        int code = 0;
-                       code = __parse_fpscr(current->thread.fpscr.val);
+                       code = __parse_fpscr(current->thread.fp_state.fpscr);
                        _exception(SIGFPE, regs, code, regs->nip);
                        return 0;
                }
@@ -1371,8 +1371,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 
-extern void do_load_up_fpu(struct pt_regs *regs);
-
 void fp_unavailable_tm(struct pt_regs *regs)
 {
        /* Note:  This does not handle any kind of FP laziness. */
@@ -1403,8 +1401,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_ALTIVEC
-extern void do_load_up_altivec(struct pt_regs *regs);
-
 void altivec_unavailable_tm(struct pt_regs *regs)
 {
        /* See the comments in fp_unavailable_tm().  This function operates
@@ -1634,7 +1630,7 @@ void altivec_assist_exception(struct pt_regs *regs)
                /* XXX quick hack for now: set the non-Java bit in the VSCR */
                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
                                   "in %s at %lx\n", current->comm, regs->nip);
-               current->thread.vscr.u[3] |= 0x10000;
+               current->thread.vr_state.vscr.u[3] |= 0x10000;
        }
 }
 #endif /* CONFIG_ALTIVEC */
index f223409629b9c768891f54a4f117ed964a25a0e2..e58ee10fa5c0a41efbacaa9f10bc03665d18b8fb 100644 (file)
@@ -4,7 +4,11 @@
  */
 #include <asm/vdso.h>
 
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
+#else
 OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
+#endif
 OUTPUT_ARCH(powerpc:common)
 ENTRY(_start)
 
index e4863819663b35d0ca8658a5f1fd374c4b0da5c7..64fb183a47c2fff07bc385489db33bdc5c7799e3 100644 (file)
@@ -4,7 +4,11 @@
  */
 #include <asm/vdso.h>
 
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
+#else
 OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
+#endif
 OUTPUT_ARCH(powerpc:common64)
 ENTRY(_start)
 
index 604d0947cb20cd87dcad9f7e512dd5a63ddd05d4..c4bfadb2606bcc6a5cb92b456ae0903e1ea5b86c 100644 (file)
@@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs)
        vb = (instr >> 11) & 0x1f;
        vc = (instr >> 6) & 0x1f;
 
-       vrs = current->thread.vr;
+       vrs = current->thread.vr_state.vr;
        switch (instr & 0x3f) {
        case 10:
                switch (vc) {
@@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs)
                case 14:        /* vctuxs */
                        for (i = 0; i < 4; ++i)
                                vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
+                                       &current->thread.vr_state.vscr.u[3]);
                        break;
                case 15:        /* vctsxs */
                        for (i = 0; i < 4; ++i)
                                vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
+                                       &current->thread.vr_state.vscr.u[3]);
                        break;
                default:
                        return -EINVAL;
index 9e20999aaef289169dd79feb42871647d4c6dd5c..eacda4eea2d70af507771df52dfa37bd20c73f55 100644 (file)
@@ -8,29 +8,6 @@
 #include <asm/ptrace.h>
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_altivec from C.
- * void do_load_up_altivec(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_altivec)
-       mflr    r0
-       std     r0, 16(r1)
-       stdu    r1, -112(r1)
-
-       subi    r6, r3, STACK_FRAME_OVERHEAD
-       /* load_up_altivec expects r12=MSR, r13=PACA, and returns
-        * with r12 = new MSR.
-        */
-       ld      r12,_MSR(r6)
-       GET_PACA(r13)
-       bl      load_up_altivec
-       std     r12,_MSR(r6)
-
-       ld      r0, 112+16(r1)
-       addi    r1, r1, 112
-       mtlr    r0
-       blr
-
 /* void do_load_up_transact_altivec(struct thread_struct *thread)
  *
  * This is similar to load_up_altivec but for the transactional version of the
@@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec)
        li      r4,1
        stw     r4,THREAD_USED_VR(r3)
 
-       li      r10,THREAD_TRANSACT_VSCR
+       li      r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
        lvx     vr0,r10,r3
        mtvscr  vr0
-       REST_32VRS_TRANSACT(0,r4,r3)
+       addi    r10,r3,THREAD_TRANSACT_VRSTATE
+       REST_32VRS(0,r4,r10)
 
        /* Disable VEC again. */
        MTMSRD(r6)
@@ -59,7 +37,28 @@ _GLOBAL(do_load_up_transact_altivec)
 #endif
 
 /*
- * load_up_altivec(unused, unused, tsk)
+ * Load state from memory into VMX registers including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(load_vr_state)
+       li      r4,VRSTATE_VSCR
+       lvx     vr0,r4,r3
+       mtvscr  vr0
+       REST_32VRS(0,r4,r3)
+       blr
+
+/*
+ * Store VMX state into memory, including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(store_vr_state)
+       SAVE_32VRS(0, r4, r3)
+       mfvscr  vr0
+       li      r4, VRSTATE_VSCR
+       stvx    vr0, r4, r3
+       blr
+
+/*
  * Disable VMX for the task which had it previously,
  * and save its vector registers in its thread_struct.
  * Enables the VMX for use in the kernel on return.
@@ -90,10 +89,11 @@ _GLOBAL(load_up_altivec)
        /* Save VMX state to last_task_used_altivec's THREAD struct */
        toreal(r4)
        addi    r4,r4,THREAD
-       SAVE_32VRS(0,r5,r4)
+       addi    r7,r4,THREAD_VRSTATE
+       SAVE_32VRS(0,r5,r7)
        mfvscr  vr0
-       li      r10,THREAD_VSCR
-       stvx    vr0,r10,r4
+       li      r10,VRSTATE_VSCR
+       stvx    vr0,r10,r7
        /* Disable VMX for last_task_used_altivec */
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
@@ -125,12 +125,13 @@ _GLOBAL(load_up_altivec)
        oris    r12,r12,MSR_VEC@h
        std     r12,_MSR(r1)
 #endif
+       addi    r7,r5,THREAD_VRSTATE
        li      r4,1
-       li      r10,THREAD_VSCR
+       li      r10,VRSTATE_VSCR
        stw     r4,THREAD_USED_VR(r5)
-       lvx     vr0,r10,r5
+       lvx     vr0,r10,r7
        mtvscr  vr0
-       REST_32VRS(0,r4,r5)
+       REST_32VRS(0,r4,r7)
 #ifndef CONFIG_SMP
        /* Update last_task_used_altivec to 'current' */
        subi    r4,r5,THREAD            /* Back to 'current' */
@@ -165,12 +166,16 @@ _GLOBAL(giveup_altivec)
        PPC_LCMPI       0,r3,0
        beqlr                           /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
+       PPC_LL  r7,THREAD_VRSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
-       PPC_LCMPI       0,r5,0
-       SAVE_32VRS(0,r4,r3)
+       PPC_LCMPI       0,r7,0
+       bne     2f
+       addi    r7,r3,THREAD_VRSTATE
+2:     PPC_LCMPI       0,r5,0
+       SAVE_32VRS(0,r4,r7)
        mfvscr  vr0
-       li      r4,THREAD_VSCR
-       stvx    vr0,r4,r3
+       li      r4,VRSTATE_VSCR
+       stvx    vr0,r4,r7
        beq     1f
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 #ifdef CONFIG_VSX
index 78a350670de32b4ba7013ea8fefe2ab5c0d5d626..089de12b9ab0946e324c4521e11936ae685b15da 100644 (file)
@@ -1413,8 +1413,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
 
                /* needed to ensure proper operation of coherent allocations
                 * later, in case driver doesn't set it explicitly */
-               dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
-               dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+               dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
        }
 
        /* register with generic device framework */
@@ -1530,11 +1529,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
        const char *cp;
 
        dn = dev->of_node;
-       if (!dn)
-               return -ENODEV;
+       if (!dn) {
+               strcat(buf, "\n");
+               return strlen(buf);
+       }
        cp = of_get_property(dn, "compatible", NULL);
-       if (!cp)
-               return -ENODEV;
+       if (!cp) {
+               strcat(buf, "\n");
+               return strlen(buf);
+       }
 
        return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
 }
index ffaef2cb101a4ef50c77f1dc58c3cdac1f2da105..e593ff257bd300461d8a6e487e0fb448e5d86936 100644 (file)
@@ -6,6 +6,7 @@ source "virt/kvm/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
+       depends on !CPU_LITTLE_ENDIAN
        ---help---
          Say Y here to get to see options for using your Linux host to run
          other operating systems inside virtual machines (guests).
index 294b7af28cdd3c58b2551c53b9210daa429f836d..c71103b8a748350947ad3c2f6b7256c6c2841405 100644 (file)
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 BEGIN_FTR_SECTION
        mfspr   r8, SPRN_DSCR
        ld      r7, HSTATE_DSCR(r13)
-       std     r8, VCPU_DSCR(r7)
+       std     r8, VCPU_DSCR(r9)
        mtspr   SPRN_DSCR, r7
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
index 27db1e66595987a99e2f387819345af30baad739..c0b48f96a91c9817b17e80b6b32f3f4e6aac0167 100644 (file)
@@ -444,7 +444,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
 #ifdef CONFIG_VSX
        u64 *vcpu_vsx = vcpu->arch.vsr;
 #endif
-       u64 *thread_fpr = (u64*)t->fpr;
+       u64 *thread_fpr = &t->fp_state.fpr[0][0];
        int i;
 
        /*
@@ -466,14 +466,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
                /*
                 * Note that on CPUs with VSX, giveup_fpu stores
                 * both the traditional FP registers and the added VSX
-                * registers into thread.fpr[].
+                * registers into thread.fp_state.fpr[].
                 */
                if (current->thread.regs->msr & MSR_FP)
                        giveup_fpu(current);
                for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
                        vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
 
-               vcpu->arch.fpscr = t->fpscr.val;
+               vcpu->arch.fpscr = t->fp_state.fpscr;
 
 #ifdef CONFIG_VSX
                if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +486,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
        if (msr & MSR_VEC) {
                if (current->thread.regs->msr & MSR_VEC)
                        giveup_altivec(current);
-               memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
-               vcpu->arch.vscr = t->vscr;
+               memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
+               vcpu->arch.vscr = t->vr_state.vscr;
        }
 #endif
 
@@ -539,7 +539,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 #ifdef CONFIG_VSX
        u64 *vcpu_vsx = vcpu->arch.vsr;
 #endif
-       u64 *thread_fpr = (u64*)t->fpr;
+       u64 *thread_fpr = &t->fp_state.fpr[0][0];
        int i;
 
        /* When we have paired singles, we emulate in software */
@@ -584,15 +584,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
                        thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
 #endif
-               t->fpscr.val = vcpu->arch.fpscr;
+               t->fp_state.fpscr = vcpu->arch.fpscr;
                t->fpexc_mode = 0;
                kvmppc_load_up_fpu();
        }
 
        if (msr & MSR_VEC) {
 #ifdef CONFIG_ALTIVEC
-               memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
-               t->vscr = vcpu->arch.vscr;
+               memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+               t->vr_state.vscr = vcpu->arch.vscr;
                t->vrsave = -1;
                kvmppc_load_up_altivec();
 #endif
@@ -1116,12 +1116,10 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;
-       double fpr[32][TS_FPRWIDTH];
-       unsigned int fpscr;
+       struct thread_fp_state fp;
        int fpexc_mode;
 #ifdef CONFIG_ALTIVEC
-       vector128 vr[32];
-       vector128 vscr;
+       struct thread_vr_state vr;
        unsigned long uninitialized_var(vrsave);
        int used_vr;
 #endif
@@ -1153,8 +1151,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        /* Save FPU state in stack */
        if (current->thread.regs->msr & MSR_FP)
                giveup_fpu(current);
-       memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
-       fpscr = current->thread.fpscr.val;
+       fp = current->thread.fp_state;
        fpexc_mode = current->thread.fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
@@ -1163,8 +1160,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        if (used_vr) {
                if (current->thread.regs->msr & MSR_VEC)
                        giveup_altivec(current);
-               memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
-               vscr = current->thread.vscr;
+               vr = current->thread.vr_state;
                vrsave = current->thread.vrsave;
        }
 #endif
@@ -1196,15 +1192,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        current->thread.regs->msr = ext_msr;
 
        /* Restore FPU/VSX state from stack */
-       memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = fpscr;
+       current->thread.fp_state = fp;
        current->thread.fpexc_mode = fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
        /* Restore Altivec state from stack */
        if (used_vr && current->thread.used_vr) {
-               memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
-               current->thread.vscr = vscr;
+               current->thread.vr_state = vr;
                current->thread.vrsave = vrsave;
        }
        current->thread.used_vr = used_vr;
index 17722d82f1d1f500bd5579f544cf29b18e6cbd80..5133199f6cb7b6cab1feca199f434bfd69a89e1a 100644 (file)
@@ -656,9 +656,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret, s;
 #ifdef CONFIG_PPC_FPU
-       unsigned int fpscr;
+       struct thread_fp_state fp;
        int fpexc_mode;
-       u64 fpr[32];
 #endif
 
        if (!vcpu->arch.sane) {
@@ -677,13 +676,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 #ifdef CONFIG_PPC_FPU
        /* Save userspace FPU state in stack */
        enable_kernel_fp();
-       memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
-       fpscr = current->thread.fpscr.val;
+       fp = current->thread.fp_state;
        fpexc_mode = current->thread.fpexc_mode;
 
        /* Restore guest FPU state to thread */
-       memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
-       current->thread.fpscr.val = vcpu->arch.fpscr;
+       memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
+              sizeof(vcpu->arch.fpr));
+       current->thread.fp_state.fpscr = vcpu->arch.fpscr;
 
        /*
         * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -709,12 +708,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        vcpu->fpu_active = 0;
 
        /* Save guest FPU state from thread */
-       memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
-       vcpu->arch.fpscr = current->thread.fpscr.val;
+       memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
+              sizeof(vcpu->arch.fpr));
+       vcpu->arch.fpscr = current->thread.fp_state.fpscr;
 
        /* Restore userspace FPU state from stack */
-       memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = fpscr;
+       current->thread.fp_state = fp;
        current->thread.fpexc_mode = fpexc_mode;
 #endif
 
index 1c6a9d729df4a26ca3ad6c9f1c94008d681f7d42..c65593abae8eb879f71915cbf639f0dfec4468dc 100644 (file)
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        unsigned long hva;
        int pfnmap = 0;
        int tsize = BOOK3E_PAGESZ_4K;
+       int ret = 0;
+       unsigned long mmu_seq;
+       struct kvm *kvm = vcpu_e500->vcpu.kvm;
+
+       /* used to check for invalidations in progress */
+       mmu_seq = kvm->mmu_notifier_seq;
+       smp_rmb();
 
        /*
         * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
        }
 
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        /* Clear i-cache for new pages */
        kvmppc_mmu_flush_icache(pfn);
 
+out:
+       spin_unlock(&kvm->mmu_lock);
+
        /* Drop refcount on page, so that mmu notifiers can clear it */
        kvm_release_pfn_clean(pfn);
 
-       return 0;
+       return ret;
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
index 4504332766990147fdbfef302baac7919ec179fa..5310132856c11d11e31964294d9345fca55aa1a9 100644 (file)
@@ -10,15 +10,23 @@ CFLAGS_REMOVE_code-patching.o = -pg
 CFLAGS_REMOVE_feature-fixups.o = -pg
 
 obj-y                  := string.o alloc.o \
-                          checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
+                          crtsavres.o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o
 obj-$(CONFIG_HAS_IOMEM)        += devres.o
 
 obj-$(CONFIG_PPC64)    += copypage_64.o copyuser_64.o \
-                          memcpy_64.o usercopy_64.o mem_64.o string.o \
-                          checksum_wrappers_64.o hweight_64.o \
-                          copyuser_power7.o string_64.o copypage_power7.o \
-                          memcpy_power7.o
+                          usercopy_64.o mem_64.o string.o \
+                          hweight_64.o \
+                          copyuser_power7.o string_64.o copypage_power7.o
+ifeq ($(CONFIG_GENERIC_CSUM),)
+obj-y                  += checksum_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC64)    += checksum_wrappers_64.o
+endif
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),)
+obj-$(CONFIG_PPC64)            += memcpy_power7.o memcpy_64.o 
+endif
+
 obj-$(CONFIG_PPC_EMULATE_SSTEP)        += sstep.o ldstfp.o
 
 ifeq ($(CONFIG_PPC64),y)
index 167f72555d604dc6c24194e3bb9c1e0b3d021bc4..57a0720650576a9f72e98cece18a71e25045ec3a 100644 (file)
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
        blr
 
 
-       .macro source
+       .macro srcnr
 100:
        .section __ex_table,"a"
        .align 3
-       .llong 100b,.Lsrc_error
+       .llong 100b,.Lsrc_error_nr
        .previous
        .endm
 
-       .macro dest
+       .macro source
+150:
+       .section __ex_table,"a"
+       .align 3
+       .llong 150b,.Lsrc_error
+       .previous
+       .endm
+
+       .macro dstnr
 200:
        .section __ex_table,"a"
        .align 3
-       .llong 200b,.Ldest_error
+       .llong 200b,.Ldest_error_nr
+       .previous
+       .endm
+
+       .macro dest
+250:
+       .section __ex_table,"a"
+       .align 3
+       .llong 250b,.Ldest_error
        .previous
        .endm
 
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
        rldicl. r6,r3,64-1,64-2         /* r6 = (r3 & 0x3) >> 1 */
        beq     .Lcopy_aligned
 
-       li      r7,4
-       sub     r6,r7,r6
+       li      r9,4
+       sub     r6,r9,r6
        mtctr   r6
 
 1:
-source;        lhz     r6,0(r3)                /* align to doubleword */
+srcnr; lhz     r6,0(r3)                /* align to doubleword */
        subi    r5,r5,2
        addi    r3,r3,2
        adde    r0,r0,r6
-dest;  sth     r6,0(r4)
+dstnr; sth     r6,0(r4)
        addi    r4,r4,2
        bdnz    1b
 
@@ -392,10 +408,10 @@ dest;     std     r16,56(r4)
 
        mtctr   r6
 3:
-source;        ld      r6,0(r3)
+srcnr; ld      r6,0(r3)
        addi    r3,r3,8
        adde    r0,r0,r6
-dest;  std     r6,0(r4)
+dstnr; std     r6,0(r4)
        addi    r4,r4,8
        bdnz    3b
 
@@ -405,10 +421,10 @@ dest;     std     r6,0(r4)
        srdi.   r6,r5,2
        beq     .Lcopy_tail_halfword
 
-source;        lwz     r6,0(r3)
+srcnr; lwz     r6,0(r3)
        addi    r3,r3,4
        adde    r0,r0,r6
-dest;  stw     r6,0(r4)
+dstnr; stw     r6,0(r4)
        addi    r4,r4,4
        subi    r5,r5,4
 
@@ -416,10 +432,10 @@ dest;     stw     r6,0(r4)
        srdi.   r6,r5,1
        beq     .Lcopy_tail_byte
 
-source;        lhz     r6,0(r3)
+srcnr; lhz     r6,0(r3)
        addi    r3,r3,2
        adde    r0,r0,r6
-dest;  sth     r6,0(r4)
+dstnr; sth     r6,0(r4)
        addi    r4,r4,2
        subi    r5,r5,2
 
@@ -427,10 +443,10 @@ dest;     sth     r6,0(r4)
        andi.   r6,r5,1
        beq     .Lcopy_finish
 
-source;        lbz     r6,0(r3)
+srcnr; lbz     r6,0(r3)
        sldi    r9,r6,8                 /* Pad the byte out to 16 bits */
        adde    r0,r0,r9
-dest;  stb     r6,0(r4)
+dstnr; stb     r6,0(r4)
 
 .Lcopy_finish:
        addze   r0,r0                   /* add in final carry */
@@ -440,6 +456,11 @@ dest;      stb     r6,0(r4)
        blr
 
 .Lsrc_error:
+       ld      r14,STK_REG(R14)(r1)
+       ld      r15,STK_REG(R15)(r1)
+       ld      r16,STK_REG(R16)(r1)
+       addi    r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
        cmpdi   0,r7,0
        beqlr
        li      r6,-EFAULT
@@ -447,6 +468,11 @@ dest;      stb     r6,0(r4)
        blr
 
 .Ldest_error:
+       ld      r14,STK_REG(R14)(r1)
+       ld      r15,STK_REG(R15)(r1)
+       ld      r16,STK_REG(R16)(r1)
+       addi    r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
        cmpdi   0,r8,0
        beqlr
        li      r6,-EFAULT
index d1f11795a7ad64bd6bd05beb522e07e44eb46498..e8e9c36dc7844455c4b24356cdff5f9ed9e70aff 100644 (file)
  */
 #include <asm/ppc_asm.h>
 
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB)         lvsl    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB)         lvsr    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRB,VRA,VRC
+#endif
+
        .macro err1
 100:
        .section __ex_table,"a"
@@ -552,13 +560,13 @@ err3;     stw     r7,4(r3)
        li      r10,32
        li      r11,48
 
-       lvsl    vr16,0,r4       /* Setup permute control vector */
+       LVS(vr16,0,r4)          /* Setup permute control vector */
 err3;  lvx     vr0,0,r4
        addi    r4,r4,16
 
        bf      cr7*4+3,5f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
 err3;  stvx    vr8,r0,r3
        addi    r3,r3,16
@@ -566,9 +574,9 @@ err3;       stvx    vr8,r0,r3
 
 5:     bf      cr7*4+2,6f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
 err3;  lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -576,13 +584,13 @@ err3;     stvx    vr9,r3,r9
 
 6:     bf      cr7*4+1,7f
 err3;  lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
 err3;  lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
 err3;  lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
 err3;  lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -611,21 +619,21 @@ err3;     stvx    vr11,r3,r11
        .align  5
 8:
 err4;  lvx     vr7,r0,r4
-       vperm   vr8,vr0,vr7,vr16
+       VPERM(vr8,vr0,vr7,vr16)
 err4;  lvx     vr6,r4,r9
-       vperm   vr9,vr7,vr6,vr16
+       VPERM(vr9,vr7,vr6,vr16)
 err4;  lvx     vr5,r4,r10
-       vperm   vr10,vr6,vr5,vr16
+       VPERM(vr10,vr6,vr5,vr16)
 err4;  lvx     vr4,r4,r11
-       vperm   vr11,vr5,vr4,vr16
+       VPERM(vr11,vr5,vr4,vr16)
 err4;  lvx     vr3,r4,r12
-       vperm   vr12,vr4,vr3,vr16
+       VPERM(vr12,vr4,vr3,vr16)
 err4;  lvx     vr2,r4,r14
-       vperm   vr13,vr3,vr2,vr16
+       VPERM(vr13,vr3,vr2,vr16)
 err4;  lvx     vr1,r4,r15
-       vperm   vr14,vr2,vr1,vr16
+       VPERM(vr14,vr2,vr1,vr16)
 err4;  lvx     vr0,r4,r16
-       vperm   vr15,vr1,vr0,vr16
+       VPERM(vr15,vr1,vr0,vr16)
        addi    r4,r4,128
 err4;  stvx    vr8,r0,r3
 err4;  stvx    vr9,r3,r9
@@ -649,13 +657,13 @@ err4;     stvx    vr15,r3,r16
 
        bf      cr7*4+1,9f
 err3;  lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
 err3;  lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
 err3;  lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
 err3;  lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -665,9 +673,9 @@ err3;       stvx    vr11,r3,r11
 
 9:     bf      cr7*4+2,10f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
 err3;  lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -675,7 +683,7 @@ err3;       stvx    vr9,r3,r9
 
 10:    bf      cr7*4+3,11f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
 err3;  stvx    vr8,r0,r3
        addi    r3,r3,16
index 0663630baf3b46373905d60d96e989fee6637aa8..e4177dbea6bd6a9e59e1cfc548195b1223b8eb0d 100644 (file)
 #include <asm/ppc_asm.h>
 
 _GLOBAL(memcpy_power7)
+
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB)         lvsl    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB)         lvsr    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRB,VRA,VRC
+#endif
+
 #ifdef CONFIG_ALTIVEC
        cmpldi  r5,16
        cmpldi  cr1,r5,4096
@@ -485,13 +494,13 @@ _GLOBAL(memcpy_power7)
        li      r10,32
        li      r11,48
 
-       lvsl    vr16,0,r4       /* Setup permute control vector */
+       LVS(vr16,0,r4)          /* Setup permute control vector */
        lvx     vr0,0,r4
        addi    r4,r4,16
 
        bf      cr7*4+3,5f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
        stvx    vr8,r0,r3
        addi    r3,r3,16
@@ -499,9 +508,9 @@ _GLOBAL(memcpy_power7)
 
 5:     bf      cr7*4+2,6f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -509,13 +518,13 @@ _GLOBAL(memcpy_power7)
 
 6:     bf      cr7*4+1,7f
        lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
        lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
        lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
        lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -544,21 +553,21 @@ _GLOBAL(memcpy_power7)
        .align  5
 8:
        lvx     vr7,r0,r4
-       vperm   vr8,vr0,vr7,vr16
+       VPERM(vr8,vr0,vr7,vr16)
        lvx     vr6,r4,r9
-       vperm   vr9,vr7,vr6,vr16
+       VPERM(vr9,vr7,vr6,vr16)
        lvx     vr5,r4,r10
-       vperm   vr10,vr6,vr5,vr16
+       VPERM(vr10,vr6,vr5,vr16)
        lvx     vr4,r4,r11
-       vperm   vr11,vr5,vr4,vr16
+       VPERM(vr11,vr5,vr4,vr16)
        lvx     vr3,r4,r12
-       vperm   vr12,vr4,vr3,vr16
+       VPERM(vr12,vr4,vr3,vr16)
        lvx     vr2,r4,r14
-       vperm   vr13,vr3,vr2,vr16
+       VPERM(vr13,vr3,vr2,vr16)
        lvx     vr1,r4,r15
-       vperm   vr14,vr2,vr1,vr16
+       VPERM(vr14,vr2,vr1,vr16)
        lvx     vr0,r4,r16
-       vperm   vr15,vr1,vr0,vr16
+       VPERM(vr15,vr1,vr0,vr16)
        addi    r4,r4,128
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -582,13 +591,13 @@ _GLOBAL(memcpy_power7)
 
        bf      cr7*4+1,9f
        lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
        lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
        lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
        lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -598,9 +607,9 @@ _GLOBAL(memcpy_power7)
 
 9:     bf      cr7*4+2,10f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -608,7 +617,7 @@ _GLOBAL(memcpy_power7)
 
 10:    bf      cr7*4+3,11f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
        stvx    vr8,r0,r3
        addi    r3,r3,16
index a7ee978fb860c9ffd237d9fbafb716b724e3afec..b1faa1593c9067e68995e1cdc54dbb8465dce587 100644 (file)
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                 */
                if ((ra == 1) && !(regs->msr & MSR_PR) \
                        && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
                        /*
                         * Check if we will touch kernel sack overflow
                         */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                                err = -EINVAL;
                                break;
                        }
-
+#endif /* CONFIG_PPC32 */
                        /*
                         * Check if we already set since that means we'll
                         * lose the previous value.
index c33d939120c970f3f800b42b5dfa3739e28ac16f..3ea26c25590be1dabe4a057882f35b77f5dfe7c1 100644 (file)
 #define DBG_LOW(fmt...)
 #endif
 
+#ifdef __BIG_ENDIAN__
 #define HPTE_LOCK_BIT 3
+#else
+#define HPTE_LOCK_BIT (56+3)
+#endif
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
@@ -172,7 +176,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
 
 static inline void native_lock_hpte(struct hash_pte *hptep)
 {
-       unsigned long *word = &hptep->v;
+       unsigned long *word = (unsigned long *)&hptep->v;
 
        while (1) {
                if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
@@ -184,7 +188,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
 
 static inline void native_unlock_hpte(struct hash_pte *hptep)
 {
-       unsigned long *word = &hptep->v;
+       unsigned long *word = (unsigned long *)&hptep->v;
 
        clear_bit_unlock(HPTE_LOCK_BIT, word);
 }
@@ -204,10 +208,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
        }
 
        for (i = 0; i < HPTES_PER_GROUP; i++) {
-               if (! (hptep->v & HPTE_V_VALID)) {
+               if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
                        /* retry with lock held */
                        native_lock_hpte(hptep);
-                       if (! (hptep->v & HPTE_V_VALID))
+                       if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
                                break;
                        native_unlock_hpte(hptep);
                }
@@ -226,14 +230,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
                        i, hpte_v, hpte_r);
        }
 
-       hptep->r = hpte_r;
+       hptep->r = cpu_to_be64(hpte_r);
        /* Guarantee the second dword is visible before the valid bit */
        eieio();
        /*
         * Now set the first dword including the valid bit
         * NOTE: this also unlocks the hpte
         */
-       hptep->v = hpte_v;
+       hptep->v = cpu_to_be64(hpte_v);
 
        __asm__ __volatile__ ("ptesync" : : : "memory");
 
@@ -254,12 +258,12 @@ static long native_hpte_remove(unsigned long hpte_group)
 
        for (i = 0; i < HPTES_PER_GROUP; i++) {
                hptep = htab_address + hpte_group + slot_offset;
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
                        /* retry with lock held */
                        native_lock_hpte(hptep);
-                       hpte_v = hptep->v;
+                       hpte_v = be64_to_cpu(hptep->v);
                        if ((hpte_v & HPTE_V_VALID)
                            && !(hpte_v & HPTE_V_BOLTED))
                                break;
@@ -294,7 +298,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 
        native_lock_hpte(hptep);
 
-       hpte_v = hptep->v;
+       hpte_v = be64_to_cpu(hptep->v);
        /*
         * We need to invalidate the TLB always because hpte_remove doesn't do
         * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -308,8 +312,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
        } else {
                DBG_LOW(" -> hit\n");
                /* Update the HPTE */
-               hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
-                       (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
+               hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
+                       (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
        }
        native_unlock_hpte(hptep);
 
@@ -334,7 +338,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
        for (i = 0; i < HPTES_PER_GROUP; i++) {
                hptep = htab_address + slot;
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
                        /* HPTE matches */
@@ -369,8 +373,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
        hptep = htab_address + slot;
 
        /* Update the HPTE */
-       hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
-               (newpp & (HPTE_R_PP | HPTE_R_N));
+       hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
+                       ~(HPTE_R_PP | HPTE_R_N)) |
+               (newpp & (HPTE_R_PP | HPTE_R_N)));
        /*
         * Ensure it is out of the tlb too. Bolted entries base and
         * actual page size will be same.
@@ -392,7 +397,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
 
        want_v = hpte_encode_avpn(vpn, bpsize, ssize);
        native_lock_hpte(hptep);
-       hpte_v = hptep->v;
+       hpte_v = be64_to_cpu(hptep->v);
 
        /*
         * We need to invalidate the TLB always because hpte_remove doesn't do
@@ -458,7 +463,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
                hptep = htab_address + slot;
                want_v = hpte_encode_avpn(vpn, psize, ssize);
                native_lock_hpte(hptep);
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                /* Even if we miss, we need to invalidate the TLB */
                if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -519,11 +524,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
                        int *psize, int *apsize, int *ssize, unsigned long *vpn)
 {
        unsigned long avpn, pteg, vpi;
-       unsigned long hpte_v = hpte->v;
+       unsigned long hpte_v = be64_to_cpu(hpte->v);
+       unsigned long hpte_r = be64_to_cpu(hpte->r);
        unsigned long vsid, seg_off;
        int size, a_size, shift;
        /* Look at the 8 bit LP value */
-       unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+       unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 
        if (!(hpte_v & HPTE_V_LARGE)) {
                size   = MMU_PAGE_4K;
@@ -612,7 +618,7 @@ static void native_hpte_clear(void)
                 * running,  right?  and for crash dump, we probably
                 * don't want to wait for a maybe bad cpu.
                 */
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                /*
                 * Call __tlbie() here rather than tlbie() since we
@@ -664,7 +670,7 @@ static void native_flush_hash_range(unsigned long number, int local)
                        hptep = htab_address + slot;
                        want_v = hpte_encode_avpn(vpn, psize, ssize);
                        native_lock_hpte(hptep);
-                       hpte_v = hptep->v;
+                       hpte_v = be64_to_cpu(hptep->v);
                        if (!HPTE_V_COMPARE(hpte_v, want_v) ||
                            !(hpte_v & HPTE_V_VALID))
                                native_unlock_hpte(hptep);
index bde8b55897551a60b15ad6017c7910a6cd3278b8..6176b3cdf57991590df2b26f42f27eed593096bb 100644 (file)
@@ -251,19 +251,18 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
                                         void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
        unsigned long size = 0;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
-                                         &size);
+       prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
        if (prop == NULL)
                return 0;
        for (; size >= 4; size -= 4, ++prop) {
-               if (prop[0] == 40) {
+               if (be32_to_cpu(prop[0]) == 40) {
                        DBG("1T segment support detected\n");
                        cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
                        return 1;
@@ -307,23 +306,22 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
                                          void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
        unsigned long size = 0;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node,
-                                         "ibm,segment-page-sizes", &size);
+       prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
        if (prop != NULL) {
                pr_info("Page sizes from device-tree:\n");
                size /= 4;
                cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
                while(size > 0) {
-                       unsigned int base_shift = prop[0];
-                       unsigned int slbenc = prop[1];
-                       unsigned int lpnum = prop[2];
+                       unsigned int base_shift = be32_to_cpu(prop[0]);
+                       unsigned int slbenc = be32_to_cpu(prop[1]);
+                       unsigned int lpnum = be32_to_cpu(prop[2]);
                        struct mmu_psize_def *def;
                        int idx, base_idx;
 
@@ -356,8 +354,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
                                def->tlbiel = 0;
 
                        while (size > 0 && lpnum) {
-                               unsigned int shift = prop[0];
-                               int penc  = prop[1];
+                               unsigned int shift = be32_to_cpu(prop[0]);
+                               int penc  = be32_to_cpu(prop[1]);
 
                                prop += 2; size -= 2;
                                lpnum--;
@@ -390,8 +388,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
                                        const char *uname, int depth,
                                        void *data) {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       unsigned long *addr_prop;
-       u32 *page_count_prop;
+       __be64 *addr_prop;
+       __be32 *page_count_prop;
        unsigned int expected_pages;
        long unsigned int phys_addr;
        long unsigned int block_size;
@@ -405,12 +403,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
        page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
        if (page_count_prop == NULL)
                return 0;
-       expected_pages = (1 << page_count_prop[0]);
+       expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
        addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
        if (addr_prop == NULL)
                return 0;
-       phys_addr = addr_prop[0];
-       block_size = addr_prop[1];
+       phys_addr = be64_to_cpu(addr_prop[0]);
+       block_size = be64_to_cpu(addr_prop[1]);
        if (block_size != (16 * GB))
                return 0;
        printk(KERN_INFO "Huge page(16GB) memory: "
@@ -534,16 +532,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
                                       void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+       prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
        if (prop != NULL) {
                /* pft_size[0] is the NUMA CEC cookie */
-               ppc64_pft_size = prop[1];
+               ppc64_pft_size = be32_to_cpu(prop[1]);
                return 1;
        }
        return 0;
index d0cd9e4c6837d2d17620b0f646151d9a54408f71..e3734edffa697ac42cf9d4563fb754b8602d3bcd 100644 (file)
@@ -300,5 +300,58 @@ void vmemmap_free(unsigned long start, unsigned long end)
 {
 }
 
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+void register_page_bootmem_memmap(unsigned long section_nr,
+                                 struct page *start_page, unsigned long size)
+{
+}
+
+/*
+ * We do not have access to the sparsemem vmemmap, so we fallback to
+ * walking the list of sparsemem blocks which we already maintain for
+ * the sake of crashdump. In the long run, we might want to maintain
+ * a tree if performance of that linear walk becomes a problem.
+ *
+ * realmode_pfn_to_page functions can fail due to:
+ * 1) As real sparsemem blocks do not lay in RAM continously (they
+ * are in virtual address space which is not available in the real mode),
+ * the requested page struct can be split between blocks so get_page/put_page
+ * may fail.
+ * 2) When huge pages are used, the get_page/put_page API will fail
+ * in real mode as the linked addresses in the page struct are virtual
+ * too.
+ */
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+       struct vmemmap_backing *vmem_back;
+       struct page *page;
+       unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+       unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
+
+       for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
+               if (pg_va < vmem_back->virt_addr)
+                       continue;
+
+               /* Check that page struct is not split between real pages */
+               if ((pg_va + sizeof(struct page)) >
+                               (vmem_back->virt_addr + page_size))
+                       return NULL;
+
+               page = (struct page *) (vmem_back->phys + pg_va -
+                               vmem_back->virt_addr);
+               return page;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
+
+#elif defined(CONFIG_FLATMEM)
+
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+       struct page *page = pfn_to_page(pfn);
+       return page;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 
+#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
index 1cf9c5b67f241f40f46b127024c4a659e95af221..3fa93dc7fe750a9f53c875493b969f1962bea9af 100644 (file)
@@ -297,12 +297,21 @@ void __init paging_init(void)
 }
 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 
+static void __init register_page_bootmem_info(void)
+{
+       int i;
+
+       for_each_online_node(i)
+               register_page_bootmem_info_node(NODE_DATA(i));
+}
+
 void __init mem_init(void)
 {
 #ifdef CONFIG_SWIOTLB
        swiotlb_init(0);
 #endif
 
+       register_page_bootmem_info();
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
        set_max_mapnr(max_pfn);
        free_all_bootmem();
index bf56e33f8257f68717b241c98abefb2f925f2858..2345bdb4d91784bb2bbdd8ce05d9c3566e48f309 100644 (file)
@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 2ee4a707f0df85c37dba15c6ab5f4e7f84e553fc..a3f7abd2f13f7a0793ce4e2bdb00855f9724d108 100644 (file)
 #define MMCR1_UNIT_SHIFT(pmc)          (60 - (4 * ((pmc) - 1)))
 #define MMCR1_COMBINE_SHIFT(pmc)       (35 - ((pmc) - 1))
 #define MMCR1_PMCSEL_SHIFT(pmc)                (24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT                        36
 #define MMCR1_DC_QUAL_SHIFT            47
 #define MMCR1_IC_QUAL_SHIFT            46
 
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
                 * the threshold bits are used for the match value.
                 */
                if (event_is_fab_match(event[i])) {
-                       mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
-                                 EVENT_THR_CTL_MASK;
+                       mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+                                 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
                } else {
                        val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
                        mmcra |= val << MMCRA_THR_CTL_SHIFT;
index a82a41b4fd917895631104627f6b05b333dff8fe..1a7b1d0f41df9bf4ec344b77bd76223526141777 100644 (file)
@@ -303,6 +303,9 @@ void __init mpc512x_setup_diu(void)
        diu_ops.release_bootmem         = mpc512x_release_bootmem;
 }
 
+#else
+void __init mpc512x_setup_diu(void) { /* EMPTY */ }
+void __init mpc512x_init_diu(void) { /* EMPTY */ }
 #endif
 
 void __init mpc512x_init_IRQ(void)
index b69221ba07fd21868dc7c1892384969fb6c03d53..2898b737deb79e5015c7658568a696254d6f23b5 100644 (file)
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
 {
        int l1irq;
        int l2irq;
-       struct irq_chip *irqchip;
+       struct irq_chip *uninitialized_var(irqchip);
        void *hndlr;
        int type;
        u32 reg;
@@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
        case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
        case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
        case MPC52xx_IRQ_L1_CRIT:
-       default:
                pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
-                       __func__, l1irq);
+                       __func__, l2irq);
                irq_set_chip(virq, &no_irq_chip);
                return 0;
        }
index 8d21ab70e06c4aec26f55f67fc4d0f1574811970..ef0778a0ca8f996ef0b8d6c86a032c6eb05899f7 100644 (file)
@@ -48,7 +48,7 @@ struct cpm_pin {
        int port, pin, flags;
 };
 
-static struct __initdata cpm_pin tqm8xx_pins[] = {
+static struct cpm_pin tqm8xx_pins[] __initdata = {
        /* SMC1 */
        {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
        {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
@@ -63,7 +63,7 @@ static struct __initdata cpm_pin tqm8xx_pins[] = {
        {CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO},
 };
 
-static struct __initdata cpm_pin tqm8xx_fec_pins[] = {
+static struct cpm_pin tqm8xx_fec_pins[] __initdata = {
        /* MII */
        {CPM_PORTD, 3, CPM_PIN_OUTPUT},
        {CPM_PORTD, 4, CPM_PIN_OUTPUT},
index 6fae5eb99ea6febff2fdd465fcf19c64d9114643..9fced3f6d2dcfadbf0f26c71d57693d79b1c78ba 100644 (file)
@@ -9,6 +9,8 @@ config PPC_POWERNV
        select EPAPR_BOOT
        select PPC_INDIRECT_PIO
        select PPC_UDBG_16550
+       select PPC_SCOM
+       select ARCH_RANDOM
        default y
 
 config POWERNV_MSI
index 300c437d713cf1a6b4c73d2b2bb58830483219fe..050d57e0c78811e2c1276ec90ead89a827d1fc8c 100644 (file)
@@ -1,6 +1,7 @@
 obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o
-obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o
+obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o rng.o
 
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o
 obj-$(CONFIG_EEH)      += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
index cf42e74514fa192e3a8d64ed0d41ada445a974e3..02245cee78183852d52f3a907023dcd9a63529bf 100644 (file)
@@ -59,26 +59,60 @@ static struct notifier_block ioda_eeh_nb = {
 };
 
 #ifdef CONFIG_DEBUG_FS
-static int ioda_eeh_dbgfs_set(void *data, u64 val)
+static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
 {
        struct pci_controller *hose = data;
        struct pnv_phb *phb = hose->private_data;
 
-       out_be64(phb->regs + 0xD10, val);
+       out_be64(phb->regs + offset, val);
        return 0;
 }
 
-static int ioda_eeh_dbgfs_get(void *data, u64 *val)
+static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
 {
        struct pci_controller *hose = data;
        struct pnv_phb *phb = hose->private_data;
 
-       *val = in_be64(phb->regs + 0xD10);
+       *val = in_be64(phb->regs + offset);
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_dbgfs_ops, ioda_eeh_dbgfs_get,
-                       ioda_eeh_dbgfs_set, "0x%llx\n");
+static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
+                       ioda_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
+                       ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
+                       ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
 #endif /* CONFIG_DEBUG_FS */
 
 /**
@@ -106,27 +140,30 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
                ioda_eeh_nb_init = 1;
        }
 
-       /* FIXME: Enable it for PHB3 later */
-       if (phb->type == PNV_PHB_IODA1) {
+       /* We needn't HUB diag-data on PHB3 */
+       if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
+               hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
                if (!hub_diag) {
-                       hub_diag = (char *)__get_free_page(GFP_KERNEL |
-                                                          __GFP_ZERO);
-                       if (!hub_diag) {
-                               pr_err("%s: Out of memory !\n",
-                                      __func__);
-                               return -ENOMEM;
-                       }
+                       pr_err("%s: Out of memory !\n", __func__);
+                       return -ENOMEM;
                }
+       }
 
 #ifdef CONFIG_DEBUG_FS
-               if (phb->dbgfs)
-                       debugfs_create_file("err_injct", 0600,
-                                           phb->dbgfs, hose,
-                                           &ioda_eeh_dbgfs_ops);
+       if (phb->dbgfs) {
+               debugfs_create_file("err_injct_outbound", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_outb_dbgfs_ops);
+               debugfs_create_file("err_injct_inboundA", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_inbA_dbgfs_ops);
+               debugfs_create_file("err_injct_inboundB", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_inbB_dbgfs_ops);
+       }
 #endif
 
-               phb->eeh_state |= PNV_EEH_STATE_ENABLED;
-       }
+       phb->eeh_state |= PNV_EEH_STATE_ENABLED;
 
        return 0;
 }
@@ -546,8 +583,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
                        phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
        if (ret) {
                spin_unlock_irqrestore(&phb->lock, flags);
-               pr_warning("%s: Failed to get log for PHB#%x-PE#%x\n",
-                          __func__, hose->global_number, pe->addr);
+               pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
+                          __func__, hose->global_number, pe->addr, ret);
                return -EIO;
        }
 
@@ -710,6 +747,73 @@ static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
        }
 }
 
+static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
+                                   struct OpalIoPhbErrorCommon *common)
+{
+       struct OpalIoPhb3ErrorData *data;
+       int i;
+
+       data = (struct OpalIoPhb3ErrorData*)common;
+       pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
+               hose->global_number, common->version);
+
+       pr_info("  brdgCtl:              %08x\n", data->brdgCtl);
+
+       pr_info("  portStatusReg:        %08x\n", data->portStatusReg);
+       pr_info("  rootCmplxStatus:      %08x\n", data->rootCmplxStatus);
+       pr_info("  busAgentStatus:       %08x\n", data->busAgentStatus);
+
+       pr_info("  deviceStatus:         %08x\n", data->deviceStatus);
+       pr_info("  slotStatus:           %08x\n", data->slotStatus);
+       pr_info("  linkStatus:           %08x\n", data->linkStatus);
+       pr_info("  devCmdStatus:         %08x\n", data->devCmdStatus);
+       pr_info("  devSecStatus:         %08x\n", data->devSecStatus);
+
+       pr_info("  rootErrorStatus:      %08x\n", data->rootErrorStatus);
+       pr_info("  uncorrErrorStatus:    %08x\n", data->uncorrErrorStatus);
+       pr_info("  corrErrorStatus:      %08x\n", data->corrErrorStatus);
+       pr_info("  tlpHdr1:              %08x\n", data->tlpHdr1);
+       pr_info("  tlpHdr2:              %08x\n", data->tlpHdr2);
+       pr_info("  tlpHdr3:              %08x\n", data->tlpHdr3);
+       pr_info("  tlpHdr4:              %08x\n", data->tlpHdr4);
+       pr_info("  sourceId:             %08x\n", data->sourceId);
+       pr_info("  errorClass:           %016llx\n", data->errorClass);
+       pr_info("  correlator:           %016llx\n", data->correlator);
+       pr_info("  nFir:                 %016llx\n", data->nFir);
+       pr_info("  nFirMask:             %016llx\n", data->nFirMask);
+       pr_info("  nFirWOF:              %016llx\n", data->nFirWOF);
+       pr_info("  PhbPlssr:             %016llx\n", data->phbPlssr);
+       pr_info("  PhbCsr:               %016llx\n", data->phbCsr);
+       pr_info("  lemFir:               %016llx\n", data->lemFir);
+       pr_info("  lemErrorMask:         %016llx\n", data->lemErrorMask);
+       pr_info("  lemWOF:               %016llx\n", data->lemWOF);
+       pr_info("  phbErrorStatus:       %016llx\n", data->phbErrorStatus);
+       pr_info("  phbFirstErrorStatus:  %016llx\n", data->phbFirstErrorStatus);
+       pr_info("  phbErrorLog0:         %016llx\n", data->phbErrorLog0);
+       pr_info("  phbErrorLog1:         %016llx\n", data->phbErrorLog1);
+       pr_info("  mmioErrorStatus:      %016llx\n", data->mmioErrorStatus);
+       pr_info("  mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+       pr_info("  mmioErrorLog0:        %016llx\n", data->mmioErrorLog0);
+       pr_info("  mmioErrorLog1:        %016llx\n", data->mmioErrorLog1);
+       pr_info("  dma0ErrorStatus:      %016llx\n", data->dma0ErrorStatus);
+       pr_info("  dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+       pr_info("  dma0ErrorLog0:        %016llx\n", data->dma0ErrorLog0);
+       pr_info("  dma0ErrorLog1:        %016llx\n", data->dma0ErrorLog1);
+       pr_info("  dma1ErrorStatus:      %016llx\n", data->dma1ErrorStatus);
+       pr_info("  dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+       pr_info("  dma1ErrorLog0:        %016llx\n", data->dma1ErrorLog0);
+       pr_info("  dma1ErrorLog1:        %016llx\n", data->dma1ErrorLog1);
+
+       for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+               if ((data->pestA[i] >> 63) == 0 &&
+                   (data->pestB[i] >> 63) == 0)
+                       continue;
+
+               pr_info("  PE[%3d] PESTA:        %016llx\n", i, data->pestA[i]);
+               pr_info("          PESTB:        %016llx\n", data->pestB[i]);
+       }
+}
+
 static void ioda_eeh_phb_diag(struct pci_controller *hose)
 {
        struct pnv_phb *phb = hose->private_data;
@@ -728,6 +832,9 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
                ioda_eeh_p7ioc_phb_diag(hose, common);
                break;
+       case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
+               ioda_eeh_phb3_phb_diag(hose, common);
+               break;
        default:
                pr_warning("%s: Unrecognized I/O chip %d\n",
                           __func__, common->ioType);
index 79663d26e6eaa1de8749c6994316737f991106ab..73b981438cc583e0ba4345129046d56b72a8b9c1 100644 (file)
@@ -144,11 +144,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
        /*
         * Enable EEH explicitly so that we will do EEH check
         * while accessing I/O stuff
-        *
-        * FIXME: Enable that for PHB3 later
         */
-       if (phb->type == PNV_PHB_IODA1)
-               eeh_subsystem_enabled = 1;
+       eeh_subsystem_enabled = 1;
 
        /* Save memory bars */
        eeh_save_bars(edev);
index 3f83e1ae26acb591fbf3614bd9afbb6ae4ffa3d3..acd9f7e96678256e2620b24494fded8e90b08ebb 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
 void __init opal_nvram_init(void)
 {
        struct device_node *np;
-       const u32 *nbytes_p;
+       const __be32 *nbytes_p;
 
        np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram");
        if (np == NULL)
@@ -76,7 +76,7 @@ void __init opal_nvram_init(void)
                of_node_put(np);
                return;
        }
-       nvram_size = *nbytes_p;
+       nvram_size = be32_to_cpup(nbytes_p);
 
        printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
        of_node_put(np);
index 2aa7641aac9bd8080e2e42b3c0ed98ba7920c0ea..7d07c7e80ec09e9232b3cafd9c41a104992b5331 100644 (file)
@@ -37,10 +37,12 @@ unsigned long __init opal_get_boot_time(void)
        struct rtc_time tm;
        u32 y_m_d;
        u64 h_m_s_ms;
+       __be32 __y_m_d;
+       __be64 __h_m_s_ms;
        long rc = OPAL_BUSY;
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
-               rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+               rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
                else
@@ -48,6 +50,8 @@ unsigned long __init opal_get_boot_time(void)
        }
        if (rc != OPAL_SUCCESS)
                return 0;
+       y_m_d = be32_to_cpu(__y_m_d);
+       h_m_s_ms = be64_to_cpu(__h_m_s_ms);
        opal_to_tm(y_m_d, h_m_s_ms, &tm);
        return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
                      tm.tm_hour, tm.tm_min, tm.tm_sec);
@@ -58,9 +62,11 @@ void opal_get_rtc_time(struct rtc_time *tm)
        long rc = OPAL_BUSY;
        u32 y_m_d;
        u64 h_m_s_ms;
+       __be32 __y_m_d;
+       __be64 __h_m_s_ms;
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
-               rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+               rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
                else
@@ -68,6 +74,8 @@ void opal_get_rtc_time(struct rtc_time *tm)
        }
        if (rc != OPAL_SUCCESS)
                return;
+       y_m_d = be32_to_cpu(__y_m_d);
+       h_m_s_ms = be64_to_cpu(__h_m_s_ms);
        opal_to_tm(y_m_d, h_m_s_ms, tm);
 }
 
index 8f3844535fbb2e4167ced361831e6c2aeab9a190..2a03e1e63c7a910c204486f32dc9957c4c6cd47e 100644 (file)
@@ -34,7 +34,7 @@
        mtmsrd  r12,1;                  \
        LOAD_REG_ADDR(r0,.opal_return); \
        mtlr    r0;                     \
-       li      r0,MSR_DR|MSR_IR;       \
+       li      r0,MSR_DR|MSR_IR|MSR_LE;\
        andc    r12,r12,r0;             \
        li      r0,token;               \
        mtspr   SPRN_HSRR1,r12;         \
        hrfid
 
 _STATIC(opal_return)
+       /*
+        * Fixup endian on OPAL return... we should be able to simplify
+        * this by instead converting the below trampoline to a set of
+        * bytes (always BE) since MSR:LE will end up fixed up as a side
+        * effect of the rfid.
+        */
+       FIXUP_ENDIAN
        ld      r2,PACATOC(r13);
        ld      r4,8(r1);
        ld      r5,16(r1);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
new file mode 100644 (file)
index 0000000..3ed5c64
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * PowerNV LPC bus handling.
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/opal.h>
+#include <asm/scom.h>
+
+/*
+ * We could probably fit that inside the scom_map_t
+ * which is a void* after all but it's really too ugly
+ * so let's kmalloc it for now
+ */
+struct opal_scom_map {
+       uint32_t chip;
+       uint32_t addr;
+};
+
+static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count)
+{
+       struct opal_scom_map *m;
+       const __be32 *gcid;
+
+       if (!of_get_property(dev, "scom-controller", NULL)) {
+               pr_err("%s: device %s is not a SCOM controller\n",
+                       __func__, dev->full_name);
+               return SCOM_MAP_INVALID;
+       }
+       gcid = of_get_property(dev, "ibm,chip-id", NULL);
+       if (!gcid) {
+               pr_err("%s: device %s has no ibm,chip-id\n",
+                       __func__, dev->full_name);
+               return SCOM_MAP_INVALID;
+       }
+       m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL);
+       if (!m)
+               return NULL;
+       m->chip = be32_to_cpup(gcid);
+       m->addr = reg;
+
+       return (scom_map_t)m;
+}
+
+static void opal_scom_unmap(scom_map_t map)
+{
+       kfree(map);
+}
+
+static int opal_xscom_err_xlate(int64_t rc)
+{
+       switch(rc) {
+       case 0:
+               return 0;
+       /* Add more translations if necessary */
+       default:
+               return -EIO;
+       }
+}
+
+static int opal_scom_read(scom_map_t map, u32 reg, u64 *value)
+{
+       struct opal_scom_map *m = map;
+       int64_t rc;
+
+       rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+       return opal_xscom_err_xlate(rc);
+}
+
+static int opal_scom_write(scom_map_t map, u32 reg, u64 value)
+{
+       struct opal_scom_map *m = map;
+       int64_t rc;
+
+       rc = opal_xscom_write(m->chip, m->addr + reg, value);
+       return opal_xscom_err_xlate(rc);
+}
+
+static const struct scom_controller opal_scom_controller = {
+       .map    = opal_scom_map,
+       .unmap  = opal_scom_unmap,
+       .read   = opal_scom_read,
+       .write  = opal_scom_write
+};
+
+static int opal_xscom_init(void)
+{
+       if (firmware_has_feature(FW_FEATURE_OPALv3))
+               scom_init(&opal_scom_controller);
+       return 0;
+}
+arch_initcall(opal_xscom_init);
index 2911abe550f1d9182ce793f0ec4777b0c85295b7..09336f0c54c56e12954b454f6d562aac64508c50 100644 (file)
@@ -77,6 +77,7 @@ int __init early_init_dt_scan_opal(unsigned long node,
 
 static int __init opal_register_exception_handlers(void)
 {
+#ifdef __BIG_ENDIAN__
        u64 glue;
 
        if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
@@ -94,6 +95,7 @@ static int __init opal_register_exception_handlers(void)
                                        0, glue);
        glue += 128;
        opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
+#endif
 
        return 0;
 }
@@ -164,27 +166,28 @@ void opal_notifier_disable(void)
 
 int opal_get_chars(uint32_t vtermno, char *buf, int count)
 {
-       s64 len, rc;
-       u64 evt;
+       s64 rc;
+       __be64 evt, len;
 
        if (!opal.entry)
                return -ENODEV;
        opal_poll_events(&evt);
-       if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
+       if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
                return 0;
-       len = count;
-       rc = opal_console_read(vtermno, &len, buf);
+       len = cpu_to_be64(count);
+       rc = opal_console_read(vtermno, &len, buf);     
        if (rc == OPAL_SUCCESS)
-               return len;
+               return be64_to_cpu(len);
        return 0;
 }
 
 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
 {
        int written = 0;
+       __be64 olen;
        s64 len, rc;
        unsigned long flags;
-       u64 evt;
+       __be64 evt;
 
        if (!opal.entry)
                return -ENODEV;
@@ -199,13 +202,14 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
         */
        spin_lock_irqsave(&opal_write_lock, flags);
        if (firmware_has_feature(FW_FEATURE_OPALv2)) {
-               rc = opal_console_write_buffer_space(vtermno, &len);
+               rc = opal_console_write_buffer_space(vtermno, &olen);
+               len = be64_to_cpu(olen);
                if (rc || len < total_len) {
                        spin_unlock_irqrestore(&opal_write_lock, flags);
                        /* Closed -> drop characters */
                        if (rc)
                                return total_len;
-                       opal_poll_events(&evt);
+                       opal_poll_events(NULL);
                        return -EAGAIN;
                }
        }
@@ -216,8 +220,9 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
        rc = OPAL_BUSY;
        while(total_len > 0 && (rc == OPAL_BUSY ||
                                rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
-               len = total_len;
-               rc = opal_console_write(vtermno, &len, data);
+               olen = cpu_to_be64(total_len);
+               rc = opal_console_write(vtermno, &olen, data);
+               len = be64_to_cpu(olen);
 
                /* Closed or other error drop */
                if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
@@ -237,7 +242,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
                 */
                do
                        opal_poll_events(&evt);
-               while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT));
+               while(rc == OPAL_SUCCESS &&
+                       (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
        }
        spin_unlock_irqrestore(&opal_write_lock, flags);
        return written;
@@ -360,7 +366,7 @@ int opal_machine_check(struct pt_regs *regs)
 
 static irqreturn_t opal_interrupt(int irq, void *data)
 {
-       uint64_t events;
+       __be64 events;
 
        opal_handle_interrupt(virq_to_hw(irq), &events);
 
@@ -372,7 +378,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 static int __init opal_init(void)
 {
        struct device_node *np, *consoles;
-       const u32 *irqs;
+       const __be32 *irqs;
        int rc, i, irqlen;
 
        opal_node = of_find_node_by_path("/ibm,opal");
index 74a5a5773b1fbce0c31f567ef6536e75038fff98..c639af7d4826b595f8b28ded90497f677c5ff5fa 100644 (file)
@@ -70,6 +70,16 @@ define_pe_printk_level(pe_err, KERN_ERR);
 define_pe_printk_level(pe_warn, KERN_WARNING);
 define_pe_printk_level(pe_info, KERN_INFO);
 
+/*
+ * stdcix is only supposed to be used in hypervisor real mode as per
+ * the architecture spec
+ */
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+       __asm__ __volatile__("stdcix %0,0,%1"
+               : : "r" (val), "r" (paddr) : "memory");
+}
+
 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
        unsigned long pe;
@@ -454,10 +464,13 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
        }
 }
 
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
-                                        u64 *startp, u64 *endp)
+static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
+                                        struct iommu_table *tbl,
+                                        __be64 *startp, __be64 *endp, bool rm)
 {
-       u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+       __be64 __iomem *invalidate = rm ?
+               (__be64 __iomem *)pe->tce_inval_reg_phys :
+               (__be64 __iomem *)tbl->it_index;
        unsigned long start, end, inc;
 
        start = __pa(startp);
@@ -484,7 +497,10 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 
         mb(); /* Ensure above stores are visible */
         while (start <= end) {
-                __raw_writeq(start, invalidate);
+               if (rm)
+                       __raw_rm_writeq(cpu_to_be64(start), invalidate);
+               else
+                       __raw_writeq(cpu_to_be64(start), invalidate);
                 start += inc;
         }
 
@@ -496,10 +512,12 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 
 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
                                         struct iommu_table *tbl,
-                                        u64 *startp, u64 *endp)
+                                        __be64 *startp, __be64 *endp, bool rm)
 {
        unsigned long start, end, inc;
-       u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+       __be64 __iomem *invalidate = rm ?
+               (__be64 __iomem *)pe->tce_inval_reg_phys :
+               (__be64 __iomem *)tbl->it_index;
 
        /* We'll invalidate DMA address in PE scope */
        start = 0x2ul << 60;
@@ -515,22 +533,25 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
        mb();
 
        while (start <= end) {
-               __raw_writeq(start, invalidate);
+               if (rm)
+                       __raw_rm_writeq(cpu_to_be64(start), invalidate);
+               else
+                       __raw_writeq(cpu_to_be64(start), invalidate);
                start += inc;
        }
 }
 
 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
-                                u64 *startp, u64 *endp)
+                                __be64 *startp, __be64 *endp, bool rm)
 {
        struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
                                              tce32_table);
        struct pnv_phb *phb = pe->phb;
 
        if (phb->type == PNV_PHB_IODA1)
-               pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
+               pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
        else
-               pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
+               pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
 }
 
 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
@@ -603,7 +624,9 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
                 * bus number, print that out instead.
                 */
                tbl->it_busno = 0;
-               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+               pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+               tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+                               8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
                               TCE_PCI_SWINV_PAIR;
        }
@@ -681,7 +704,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
                 * bus number, print that out instead.
                 */
                tbl->it_busno = 0;
-               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+               pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+               tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+                               8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
        }
        iommu_init_table(tbl, phb->hose->node);
@@ -786,8 +811,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        struct irq_data *idata;
        struct irq_chip *ichip;
        unsigned int xive_num = hwirq - phb->msi_base;
-       uint64_t addr64;
-       uint32_t addr32, data;
+       __be32 data;
        int rc;
 
        /* No PE assigned ? bail out ... no MSI for you ! */
@@ -811,6 +835,8 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        }
 
        if (is_64) {
+               __be64 addr64;
+
                rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
                                     &addr64, &data);
                if (rc) {
@@ -818,9 +844,11 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                pci_name(dev), rc);
                        return -EIO;
                }
-               msg->address_hi = addr64 >> 32;
-               msg->address_lo = addr64 & 0xfffffffful;
+               msg->address_hi = be64_to_cpu(addr64) >> 32;
+               msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
        } else {
+               __be32 addr32;
+
                rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
                                     &addr32, &data);
                if (rc) {
@@ -829,9 +857,9 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                        return -EIO;
                }
                msg->address_hi = 0;
-               msg->address_lo = addr32;
+               msg->address_lo = be32_to_cpu(addr32);
        }
-       msg->data = data;
+       msg->data = be32_to_cpu(data);
 
        /*
         * Change the IRQ chip for the MSI interrupts on PHB3.
@@ -1106,8 +1134,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        struct pci_controller *hose;
        struct pnv_phb *phb;
        unsigned long size, m32map_off, iomap_off, pemap_off;
-       const u64 *prop64;
-       const u32 *prop32;
+       const __be64 *prop64;
+       const __be32 *prop32;
        int len;
        u64 phb_id;
        void *aux;
@@ -1142,8 +1170,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        spin_lock_init(&phb->lock);
        prop32 = of_get_property(np, "bus-range", &len);
        if (prop32 && len == 8) {
-               hose->first_busno = prop32[0];
-               hose->last_busno = prop32[1];
+               hose->first_busno = be32_to_cpu(prop32[0]);
+               hose->last_busno = be32_to_cpu(prop32[1]);
        } else {
                pr_warn("  Broken <bus-range> on %s\n", np->full_name);
                hose->first_busno = 0;
@@ -1175,7 +1203,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        if (!prop32)
                phb->ioda.total_pe = 1;
        else
-               phb->ioda.total_pe = *prop32;
+               phb->ioda.total_pe = be32_to_cpup(prop32);
 
        phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
        /* FW Has already off top 64k of M32 space (MSI space) */
@@ -1285,7 +1313,7 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
 void __init pnv_pci_init_ioda_hub(struct device_node *np)
 {
        struct device_node *phbn;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 hub_id;
 
        pr_info("Probing IODA IO-Hub %s\n", np->full_name);
index b68db6325c1b2a222f6045d5d7d5813c1c329a1a..f8b4bd8afb2e5da39cd8632c7a6a8eae54448aeb 100644 (file)
@@ -99,7 +99,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
                                           void *tce_mem, u64 tce_size)
 {
        struct pnv_phb *phb;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 phb_id;
        int64_t rc;
        static int primary = 1;
@@ -178,7 +178,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
 void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
 {
        struct device_node *phbn;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 hub_id;
        void *tce_mem;
        uint64_t tce_per_phb;
index a28d3b5e6393fa8b9d9d11bc81210ab9db13f38b..921ae673baf3e4ae86412fb3f3a932de030062e7 100644 (file)
@@ -236,7 +236,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
 {
        s64     rc;
        u8      fstate;
-       u16     pcierr;
+       __be16  pcierr;
        u32     pe_no;
 
        /*
@@ -283,16 +283,16 @@ int pnv_pci_cfg_read(struct device_node *dn,
                break;
        }
        case 2: {
-               u16 v16;
+               __be16 v16;
                rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
                                                   &v16);
-               *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
+               *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
                break;
        }
        case 4: {
-               u32 v32;
+               __be32 v32;
                rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
-               *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
+               *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
                break;
        }
        default:
@@ -401,10 +401,10 @@ struct pci_ops pnv_pci_ops = {
 
 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
                         unsigned long uaddr, enum dma_data_direction direction,
-                        struct dma_attrs *attrs)
+                        struct dma_attrs *attrs, bool rm)
 {
        u64 proto_tce;
-       u64 *tcep, *tces;
+       __be64 *tcep, *tces;
        u64 rpn;
 
        proto_tce = TCE_PCI_READ; // Read allowed
@@ -412,33 +412,48 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
        if (direction != DMA_TO_DEVICE)
                proto_tce |= TCE_PCI_WRITE;
 
-       tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+       tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
        rpn = __pa(uaddr) >> TCE_SHIFT;
 
        while (npages--)
-               *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
+               *(tcep++) = cpu_to_be64(proto_tce | (rpn++ << TCE_RPN_SHIFT));
 
        /* Some implementations won't cache invalid TCEs and thus may not
         * need that flush. We'll probably turn it_type into a bit mask
         * of flags if that becomes the case
         */
        if (tbl->it_type & TCE_PCI_SWINV_CREATE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
 
        return 0;
 }
 
-static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
+                           unsigned long uaddr,
+                           enum dma_data_direction direction,
+                           struct dma_attrs *attrs)
 {
-       u64 *tcep, *tces;
+       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
+                       false);
+}
+
+static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
+               bool rm)
+{
+       __be64 *tcep, *tces;
 
-       tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+       tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
 
        while (npages--)
-               *(tcep++) = 0;
+               *(tcep++) = cpu_to_be64(0);
 
        if (tbl->it_type & TCE_PCI_SWINV_FREE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+}
+
+static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
+{
+       pnv_tce_free(tbl, index, npages, false);
 }
 
 static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -446,6 +461,19 @@ static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
        return ((u64 *)tbl->it_base)[index - tbl->it_offset];
 }
 
+static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
+                           unsigned long uaddr,
+                           enum dma_data_direction direction,
+                           struct dma_attrs *attrs)
+{
+       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
+}
+
+static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
+{
+       pnv_tce_free(tbl, index, npages, true);
+}
+
 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
                               void *tce_mem, u64 tce_size,
                               u64 dma_offset)
@@ -484,8 +512,8 @@ static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
        swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
                                 NULL);
        if (swinvp) {
-               tbl->it_busno = swinvp[1];
-               tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
+               tbl->it_busno = be64_to_cpu(swinvp[1]);
+               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
        }
        return tbl;
@@ -610,8 +638,10 @@ void __init pnv_pci_init(void)
 
        /* Configure IOMMU DMA hooks */
        ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
-       ppc_md.tce_build = pnv_tce_build;
-       ppc_md.tce_free = pnv_tce_free;
+       ppc_md.tce_build = pnv_tce_build_vm;
+       ppc_md.tce_free = pnv_tce_free_vm;
+       ppc_md.tce_build_rm = pnv_tce_build_rm;
+       ppc_md.tce_free_rm = pnv_tce_free_rm;
        ppc_md.tce_get = pnv_tce_get;
        ppc_md.pci_probe_mode = pnv_pci_probe_mode;
        set_pci_dma_ops(&dma_iommu_ops);
index d633c64e05a1ef9204924b48a132bb505cd5e0a8..64d3b12e5b6d2661d4729a61e26dfc3bbfd98513 100644 (file)
@@ -17,7 +17,7 @@ enum pnv_phb_model {
        PNV_PHB_MODEL_PHB3,
 };
 
-#define PNV_PCI_DIAG_BUF_SIZE  4096
+#define PNV_PCI_DIAG_BUF_SIZE  8192
 #define PNV_IODA_PE_DEV                (1 << 0)        /* PE has single PCI device     */
 #define PNV_IODA_PE_BUS                (1 << 1)        /* PE has primary PCI bus       */
 #define PNV_IODA_PE_BUS_ALL    (1 << 2)        /* PE has subordinate buses     */
@@ -52,6 +52,7 @@ struct pnv_ioda_pe {
        int                     tce32_seg;
        int                     tce32_segcount;
        struct iommu_table      tce32_table;
+       phys_addr_t             tce_inval_reg_phys;
 
        /* XXX TODO: Add support for additional 64-bit iommus */
 
@@ -193,6 +194,6 @@ extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
 extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
-                                       u64 *startp, u64 *endp);
+                                       __be64 *startp, __be64 *endp, bool rm);
 
 #endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
new file mode 100644 (file)
index 0000000..02db7d7
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    "powernv-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/archrandom.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+
+
+struct powernv_rng {
+       void __iomem *regs;
+       unsigned long mask;
+};
+
+static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+
+
+static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
+{
+       unsigned long parity;
+
+       /* Calculate the parity of the value */
+       asm ("popcntd %0,%1" : "=r" (parity) : "r" (val));
+
+       /* xor our value with the previous mask */
+       val ^= rng->mask;
+
+       /* update the mask based on the parity of this value */
+       rng->mask = (rng->mask << 1) | (parity & 1);
+
+       return val;
+}
+
+int powernv_get_random_long(unsigned long *v)
+{
+       struct powernv_rng *rng;
+
+       rng = get_cpu_var(powernv_rng);
+
+       *v = rng_whiten(rng, in_be64(rng->regs));
+
+       put_cpu_var(rng);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(powernv_get_random_long);
+
+static __init void rng_init_per_cpu(struct powernv_rng *rng,
+                                   struct device_node *dn)
+{
+       int chip_id, cpu;
+
+       chip_id = of_get_ibm_chip_id(dn);
+       if (chip_id == -1)
+               pr_warn("No ibm,chip-id found for %s.\n", dn->full_name);
+
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(powernv_rng, cpu) == NULL ||
+                   cpu_to_chip_id(cpu) == chip_id) {
+                       per_cpu(powernv_rng, cpu) = rng;
+               }
+       }
+}
+
+static __init int rng_create(struct device_node *dn)
+{
+       struct powernv_rng *rng;
+       unsigned long val;
+
+       rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+       if (!rng)
+               return -ENOMEM;
+
+       rng->regs = of_iomap(dn, 0);
+       if (!rng->regs) {
+               kfree(rng);
+               return -ENXIO;
+       }
+
+       val = in_be64(rng->regs);
+       rng->mask = val;
+
+       rng_init_per_cpu(rng, dn);
+
+       pr_info_once("Registering arch random hook.\n");
+
+       ppc_md.get_random_long = powernv_get_random_long;
+
+       return 0;
+}
+
+static __init int rng_init(void)
+{
+       struct device_node *dn;
+       int rc;
+
+       for_each_compatible_node(dn, NULL, "ibm,power-rng") {
+               rc = rng_create(dn);
+               if (rc) {
+                       pr_err("Failed creating rng for %s (%d).\n",
+                               dn->full_name, rc);
+                       continue;
+               }
+
+               /* Create devices for hwrng driver */
+               of_platform_device_create(dn, NULL, NULL);
+       }
+
+       return 0;
+}
+subsys_initcall(rng_init);
index 6c61ec5ee914aa559bff1834692ab8a59b12e87d..fbccac9cd2dc393c2828ccacfd70e8018efcfb10 100644 (file)
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG)     += -DDEBUG
 
 obj-y                  := lpar.o hvCall.o nvram.o reconfig.o \
                           setup.o iommu.o event_sources.o ras.o \
-                          firmware.o power.o dlpar.o mobility.o
+                          firmware.o power.o dlpar.o mobility.o rng.o
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_SCANLOG)  += scanlog.o
 obj-$(CONFIG_EEH)      += eeh_pseries.o
index 7cfdaae1721a925c1d7370515917e7d9af5cc553..a8fe5aa3d34fd545f87a31995153effd8a63521b 100644 (file)
@@ -404,46 +404,38 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
        unsigned long drc_index;
        int rc;
 
-       cpu_hotplug_driver_lock();
        rc = strict_strtoul(buf, 0, &drc_index);
-       if (rc) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (rc)
+               return -EINVAL;
 
        parent = of_find_node_by_path("/cpus");
-       if (!parent) {
-               rc = -ENODEV;
-               goto out;
-       }
+       if (!parent)
+               return -ENODEV;
 
        dn = dlpar_configure_connector(drc_index, parent);
-       if (!dn) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (!dn)
+               return -EINVAL;
 
        of_node_put(parent);
 
        rc = dlpar_acquire_drc(drc_index);
        if (rc) {
                dlpar_free_cc_nodes(dn);
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        rc = dlpar_attach_node(dn);
        if (rc) {
                dlpar_release_drc(drc_index);
                dlpar_free_cc_nodes(dn);
-               goto out;
+               return rc;
        }
 
        rc = dlpar_online_cpu(dn);
-out:
-       cpu_hotplug_driver_unlock();
+       if (rc)
+               return rc;
 
-       return rc ? rc : count;
+       return count;
 }
 
 static int dlpar_offline_cpu(struct device_node *dn)
@@ -516,30 +508,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
                return -EINVAL;
        }
 
-       cpu_hotplug_driver_lock();
        rc = dlpar_offline_cpu(dn);
        if (rc) {
                of_node_put(dn);
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        rc = dlpar_release_drc(*drc_index);
        if (rc) {
                of_node_put(dn);
-               goto out;
+               return rc;
        }
 
        rc = dlpar_detach_node(dn);
        if (rc) {
                dlpar_acquire_drc(*drc_index);
-               goto out;
+               return rc;
        }
 
        of_node_put(dn);
-out:
-       cpu_hotplug_driver_unlock();
-       return rc ? rc : count;
+
+       return count;
 }
 
 static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
new file mode 100644 (file)
index 0000000..a702f1c
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    "pseries-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <asm/archrandom.h>
+#include <asm/machdep.h>
+
+
+static int pseries_get_random_long(unsigned long *v)
+{
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+       if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) {
+               *v = retbuf[0];
+               return 1;
+       }
+
+       return 0;
+}
+
+static __init int rng_init(void)
+{
+       struct device_node *dn;
+
+       dn = of_find_compatible_node(NULL, NULL, "ibm,random");
+       if (!dn)
+               return -ENODEV;
+
+       pr_info("Registering arch random hook.\n");
+
+       ppc_md.get_random_long = pseries_get_random_long;
+
+       return 0;
+}
+subsys_initcall(rng_init);
index 1c1771a402501d00328c450cfaeb5942f360d3f4..24f58cb0a543ece1cb92242b36ec8e18c145f5be 100644 (file)
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
 
        alloc_bootmem_cpumask_var(&of_spin_mask);
 
-       /* Mark threads which are still spinning in hold loops. */
-       if (cpu_has_feature(CPU_FTR_SMT)) {
-               for_each_present_cpu(i) { 
-                       if (cpu_thread_in_core(i) == 0)
-                               cpumask_set_cpu(i, of_spin_mask);
-               }
-       } else {
-               cpumask_copy(of_spin_mask, cpu_present_mask);
+       /*
+        * Mark threads which are still spinning in hold loops
+        *
+        * We know prom_init will not have started them if RTAS supports
+        * query-cpu-stopped-state.
+        */
+       if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+               if (cpu_has_feature(CPU_FTR_SMT)) {
+                       for_each_present_cpu(i) {
+                               if (cpu_thread_in_core(i) == 0)
+                                       cpumask_set_cpu(i, of_spin_mask);
+                       }
+               } else
+                       cpumask_copy(of_spin_mask, cpu_present_mask);
+
+               cpumask_clear_cpu(boot_cpuid, of_spin_mask);
        }
 
-       cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
        /* Non-lpar has additional take/give timebase */
        if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
                smp_ops->give_timebase = rtas_give_timebase;
index b56b70aeb4971c6f68a9c0815bfcadce906947ac..268bc899c1f7168ca5827920010af0b53595c228 100644 (file)
@@ -116,7 +116,14 @@ static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
 
        scom_write(scom, SCOM_RAMIC, cmd);
 
-       while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
+       for (;;) {
+               if (scom_read(scom, SCOM_RAMC, &val) != 0) {
+                       pr_err("SCOM error on instruction 0x%08x, thread %d\n",
+                              insn, thread);
+                       return -1;
+               }
+               if (val & mask)
+                       break;
                pr_devel("Waiting on RAMC = 0x%llx\n", val);
                if (++n == 3) {
                        pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
@@ -151,9 +158,7 @@ static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
        if (rc)
                return rc;
 
-       *out_gpr = scom_read(scom, SCOM_RAMD);
-
-       return 0;
+       return scom_read(scom, SCOM_RAMD, out_gpr);
 }
 
 static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
@@ -353,7 +358,10 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
 
        pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
 
-       pccr0 = scom_read(scom, SCOM_PCCR0);
+       if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
+               printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
+               return -1;
+       }
        scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
                                     SCOM_PCCR0_ENABLE_RAM);
 
index 4052e2259f3016d904a707ff3e2524e1f2f58ac6..54172c4a8a641ea4fd3f577fc581f09cfb7e5c2c 100644 (file)
@@ -50,18 +50,22 @@ static void wsp_scom_unmap(scom_map_t map)
        iounmap((void *)map);
 }
 
-static u64 wsp_scom_read(scom_map_t map, u32 reg)
+static int wsp_scom_read(scom_map_t map, u32 reg, u64 *value)
 {
        u64 __iomem *addr = (u64 __iomem *)map;
 
-       return in_be64(addr + reg);
+       *value = in_be64(addr + reg);
+
+       return 0;
 }
 
-static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
+static int wsp_scom_write(scom_map_t map, u32 reg, u64 value)
 {
        u64 __iomem *addr = (u64 __iomem *)map;
 
-       return out_be64(addr + reg, value);
+       out_be64(addr + reg, value);
+
+       return 0;
 }
 
 static const struct scom_controller wsp_scom_controller = {
index d25cc96c21b82fda4dcb44872446ec9b206968c7..ddb6efe889144dd78e15e80150e7b8a75bbb2d89 100644 (file)
@@ -89,6 +89,7 @@ void wsp_halt(void)
        struct device_node *dn;
        struct device_node *mine;
        struct device_node *me;
+       int rc;
 
        me = of_get_cpu_node(smp_processor_id(), NULL);
        mine = scom_find_parent(me);
@@ -101,15 +102,15 @@ void wsp_halt(void)
 
                /* read-modify-write it so the HW probe does not get
                 * confused */
-               val = scom_read(m, 0);
-               val |= 1;
-               scom_write(m, 0, val);
+               rc = scom_read(m, 0, &val);
+               if (rc == 0)
+                       scom_write(m, 0, val | 1);
                scom_unmap(m);
        }
        m = scom_map(mine, 0, 1);
-       val = scom_read(m, 0);
-       val |= 1;
-       scom_write(m, 0, val);
+       rc = scom_read(m, 0, &val);
+       if (rc == 0)
+               scom_write(m, 0, val | 1);
        /* should never return */
        scom_unmap(m);
 }
index ab4cb54764721870b06e3b08ee42121fc7fb7ea4..13ec968be4c7a25ca79fc480a7aab26e514b3bb8 100644 (file)
@@ -28,7 +28,7 @@ config PPC_SCOM
 
 config SCOM_DEBUGFS
        bool "Expose SCOM controllers via debugfs"
-       depends on PPC_SCOM
+       depends on PPC_SCOM && DEBUG_FS
        default n
 
 config GE_FPGA
index ccfb50ddfe38f7242b071c6519aaa1eb43daf6a2..92e7258478d8eafcc7f3c7655446601ce4fa61d1 100644 (file)
@@ -45,7 +45,7 @@ static void quirk_fsl_pcie_header(struct pci_dev *dev)
        u8 hdr_type;
 
        /* if we aren't a PCIe don't bother */
-       if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+       if (!pci_is_pcie(dev))
                return;
 
        /* if we aren't in host mode don't bother */
index 1be54faf60dd8be75f9f149101733c4c1c92f299..bdcb8588e4926c42130935a0ef33d2d19cab4b99 100644 (file)
@@ -1088,8 +1088,14 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
         * is done here.
         */
        if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
+               int cpu;
+
+               preempt_disable();
+               cpu = mpic_processor_id(mpic);
+               preempt_enable();
+
                mpic_set_vector(virq, hw);
-               mpic_set_destination(virq, mpic_processor_id(mpic));
+               mpic_set_destination(virq, cpu);
                mpic_irq_set_priority(virq, 8);
        }
 
index 9193e12df6951ed570d0558d8d361f99af117153..3963d995648a551e0dbe355833ff3b204d6980e7 100644 (file)
@@ -53,7 +53,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
 {
        struct device_node *parent;
        unsigned int cells, size;
-       const u32 *prop;
+       const __be32 *prop, *sprop;
        u64 reg, cnt;
        scom_map_t ret;
 
@@ -62,12 +62,24 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
        if (parent == NULL)
                return 0;
 
-       prop = of_get_property(parent, "#scom-cells", NULL);
-       cells = prop ? *prop : 1;
-
+       /*
+        * We support "scom-reg" properties for adding scom registers
+        * to a random device-tree node with an explicit scom-parent
+        *
+        * We also support the simple "reg" property if the device is
+        * a direct child of a scom controller.
+        *
+        * In case both exist, "scom-reg" takes precedence.
+        */
        prop = of_get_property(dev, "scom-reg", &size);
+       sprop = of_get_property(parent, "#scom-cells", NULL);
+       if (!prop && parent == dev->parent) {
+               prop = of_get_property(dev, "reg", &size);
+               sprop = of_get_property(parent, "#address-cells", NULL);
+       }
        if (!prop)
-               return 0;
+               return NULL;
+       cells = sprop ? be32_to_cpup(sprop) : 1;
        size >>= 2;
 
        if (index >= (size / (2*cells)))
@@ -137,8 +149,7 @@ static int scom_val_get(void *data, u64 *val)
        if (!scom_map_ok(ent->map))
                return -EFAULT;
 
-       *val = scom_read(ent->map, 0);
-       return 0;
+       return scom_read(ent->map, 0, val);
 }
 DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
                        "0x%llx\n");
@@ -169,7 +180,7 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
 
        debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
        debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
-       debugfs_create_blob("path", 0400, dir, &ent->blob);
+       debugfs_create_blob("devspec", 0400, dir, &ent->blob);
 
        return 0;
 }
@@ -185,8 +196,13 @@ static int scom_debug_init(void)
                return -1;
 
        i = rc = 0;
-       for_each_node_with_property(dn, "scom-controller")
-               rc |= scom_debug_init_one(root, dn, i++);
+       for_each_node_with_property(dn, "scom-controller") {
+               int id = of_get_ibm_chip_id(dn);
+               if (id == -1)
+                       id = i;
+               rc |= scom_debug_init_one(root, dn, id);
+               i++;
+       }
 
        return rc;
 }
index 39d72212655e3706ef5ca09dacd689156bc34399..3c6ee1b64e5d1eec2bf5d03679d537f1bc5c5d53 100644 (file)
@@ -112,6 +112,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
                                 bool force)
 {
        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+       __be16 oserver;
        int16_t server;
        int8_t priority;
        int64_t rc;
@@ -120,13 +121,13 @@ static int ics_opal_set_affinity(struct irq_data *d,
        if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
                return -1;
 
-       rc = opal_get_xive(hw_irq, &server, &priority);
+       rc = opal_get_xive(hw_irq, &oserver, &priority);
        if (rc != OPAL_SUCCESS) {
-               pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
-                      " error %lld\n",
-                      __func__, d->irq, hw_irq, server, rc);
+               pr_err("%s: opal_get_xive(irq=%d [hw 0x%x]) error %lld\n",
+                      __func__, d->irq, hw_irq, rc);
                return -1;
        }
+       server = be16_to_cpu(oserver);
 
        wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
        if (wanted_server < 0) {
@@ -181,7 +182,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
 {
        unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
@@ -201,7 +202,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
 static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
 {
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        /* Check if HAL knows about this interrupt */
@@ -215,14 +216,14 @@ static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
 static long ics_opal_get_server(struct ics *ics, unsigned long vec)
 {
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        /* Check if HAL knows about this interrupt */
        rc = opal_get_xive(vec, &server, &priority);
        if (rc != OPAL_SUCCESS)
                return -1;
-       return ics_opal_unmangle_server(server);
+       return ics_opal_unmangle_server(be16_to_cpu(server));
 }
 
 int __init ics_opal_init(void)
index dcc6ac2d802637ab6e5fb9c004eaa69ba5e01479..a02177fb5ec1d25f9daaf54b108c7854ff870209 100644 (file)
@@ -93,16 +93,17 @@ config S390
        select ARCH_INLINE_WRITE_UNLOCK_IRQ
        select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
        select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+       select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES if !SMP
+       select GENERIC_FIND_FIRST_BIT
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-       select HAVE_ARCH_MUTEX_CPU_RELAX
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
index a7d68a467ce884d77a9c615db5fcba0cdb251e71..ecc9d4f73cc6a3479254e9e55955795772eef593 100644 (file)
@@ -35,13 +35,13 @@ endif
 
 export LD_BFD
 
-cflags-$(CONFIG_MARCH_G5)   += $(call cc-option,-march=g5)
-cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
-cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
-cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
-cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
-cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
-cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
+cflags-$(CONFIG_MARCH_G5)     += -march=g5
+cflags-$(CONFIG_MARCH_Z900)   += -march=z900
+cflags-$(CONFIG_MARCH_Z990)   += -march=z990
+cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
+cflags-$(CONFIG_MARCH_Z10)    += -march=z10
+cflags-$(CONFIG_MARCH_Z196)   += -march=z196
+cflags-$(CONFIG_MARCH_ZEC12)  += -march=zEC12
 
 #KBUILD_IMAGE is necessary for make rpm
 KBUILD_IMAGE   :=arch/s390/boot/image
index 87a22092b68f8b152a7df862d4c47604e10edee5..603d2003cd9fed7563a9f61cde7dd28823080d2e 100644 (file)
@@ -204,7 +204,7 @@ static int
 appldata_timer_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int len;
+       unsigned int len;
        char buf[2];
 
        if (!*lenp || *ppos) {
@@ -246,7 +246,8 @@ static int
 appldata_interval_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int len, interval;
+       unsigned int len;
+       int interval;
        char buf[16];
 
        if (!*lenp || *ppos) {
@@ -290,7 +291,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct appldata_ops *ops = NULL, *tmp_ops;
-       int rc, len, found;
+       unsigned int len;
+       int rc, found;
        char buf[2];
        struct list_head *lh;
 
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
new file mode 100644 (file)
index 0000000..e0af2ee
--- /dev/null
@@ -0,0 +1,655 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_RDS_DEBUG=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_XFS_DEBUG=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_READABLE_ASM=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=300
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MAKE_REQUEST=y
+CONFIG_FAIL_IO_TIMEOUT=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
new file mode 100644 (file)
index 0000000..b9f6b4c
--- /dev/null
@@ -0,0 +1,618 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCOV_KERNEL=y
+CONFIG_GCOV_PROFILE_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
new file mode 100644 (file)
index 0000000..91087b4
--- /dev/null
@@ -0,0 +1,610 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
new file mode 100644 (file)
index 0000000..d725c4d
--- /dev/null
@@ -0,0 +1,86 @@
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+# CONFIG_COMPAT is not set
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_HZ_100=y
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_CHECK_STACK is not set
+# CONFIG_CHSC_SCH is not set
+# CONFIG_SCM_BUS is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_IUCV is not set
+CONFIG_ATM=y
+CONFIG_ATM_LANE=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV_XPRAM is not set
+# CONFIG_DCSSBLK is not set
+# CONFIG_DASD is not set
+CONFIG_ENCLOSURE_SERVICES=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_ENCLOSURE=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ZFCP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_HVC_IUCV is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_SCLP_ASYNC is not set
+# CONFIG_HMC_DRV is not set
+# CONFIG_S390_TAPE is not set
+# CONFIG_VMCP is not set
+# CONFIG_MONWRITER is not set
+# CONFIG_S390_VMUR is not set
+# CONFIG_HID is not set
+CONFIG_MEMSTICK=y
+CONFIG_MEMSTICK_DEBUG=y
+CONFIG_MEMSTICK_UNSAFE_RESUME=y
+CONFIG_MSPRO_BLOCK=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_INOTIFY_USER is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
index b4dbade8ca247c52106ecd72036983c850369431..46cae138ece2efa3617447d1fccf82fa504d4e7d 100644 (file)
@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
        }
 };
 
+static int xts_aes_alg_reg;
+
 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
        }
 };
 
+static int ctr_aes_alg_reg;
+
 static int __init aes_s390_init(void)
 {
        int ret;
@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
                ret = crypto_register_alg(&xts_aes_alg);
                if (ret)
                        goto xts_aes_err;
+               xts_aes_alg_reg = 1;
        }
 
        if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
                        free_page((unsigned long) ctrblk);
                        goto ctr_aes_err;
                }
+               ctr_aes_alg_reg = 1;
        }
 
 out:
@@ -921,9 +927,12 @@ aes_err:
 
 static void __exit aes_s390_fini(void)
 {
-       crypto_unregister_alg(&ctr_aes_alg);
-       free_page((unsigned long) ctrblk);
-       crypto_unregister_alg(&xts_aes_alg);
+       if (ctr_aes_alg_reg) {
+               crypto_unregister_alg(&ctr_aes_alg);
+               free_page((unsigned long) ctrblk);
+       }
+       if (xts_aes_alg_reg)
+               crypto_unregister_alg(&xts_aes_alg);
        crypto_unregister_alg(&cbc_aes_alg);
        crypto_unregister_alg(&ecb_aes_alg);
        crypto_unregister_alg(&aes_alg);
index d204c65bf722cf7ad35d12d3643830cee5880d8c..33f57514f4245a3835438801c99e11c749ac33a3 100644 (file)
@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
-# CONFIG_EFI_PARTITION is not set
 CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z196=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
index c797832daa5f596bac9da8f5075d1bd2e006559c..12c5ec156502a87306e093396b7d195b7b4fa50d 100644 (file)
 
 #define ATOMIC_INIT(i)  { (i) }
 
-#define __CS_LOOP(ptr, op_val, op_string) ({                           \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR    "lao"
+#define __ATOMIC_AND   "lan"
+#define __ATOMIC_ADD   "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
+       int old_val;                                                    \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR    "or"
+#define __ATOMIC_AND   "nr"
+#define __ATOMIC_ADD   "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
        int old_val, new_val;                                           \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
        asm volatile(                                                   \
                "       l       %0,%2\n"                                \
                "0:     lr      %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       cs      %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline int atomic_read(const atomic_t *v)
 {
        int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "ar");
+       return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
 }
-#define atomic_add(_i, _v)             atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v)                 atomic_add_return(1, _v)
-#define atomic_inc_return(_v)          atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "asi    %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic_add_return(i, v);
+       }
+#else
+       atomic_add_return(i, v);
+#endif
 }
-#define atomic_sub(_i, _v)             atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v)                 atomic_add(1, _v)
+#define atomic_inc_return(_v)          atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v)             atomic_add(-(int)_i, _v)
+#define atomic_sub_return(_i, _v)      atomic_add_return(-(int)(_i), _v)
 #define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v)                 atomic_sub_return(1, _v)
+#define atomic_dec(_v)                 atomic_sub(1, _v)
 #define atomic_dec_return(_v)          atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, ~mask, "nr");
+       __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
 }
 
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, mask, "or");
+       __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
 }
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        asm volatile(
                "       cs      %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
 
 #define ATOMIC64_INIT(i)  { (i) }
 
 #ifdef CONFIG_64BIT
 
-#define __CSG_LOOP(ptr, op_val, op_string) ({                          \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR  "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
+       long long old_val;                                              \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR  "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
        long long old_val, new_val;                                     \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
        asm volatile(                                                   \
                "       lg      %0,%2\n"                                \
                "0:     lgr     %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       csg     %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline long long atomic64_read(const atomic64_t *v)
 {
        long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
 
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-       return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       return __CSG_LOOP(v, i, "sgr");
+       return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
 }
 
 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, ~mask, "ngr");
+       __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
 }
 
 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, mask, "ogr");
+       __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
 }
 
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 {
        asm volatile(
                "       csg     %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
 
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
 
 #else /* CONFIG_64BIT */
 
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
                "       lm      %0,%N0,%1\n"
                "0:     cds     %0,%2,%1\n"
                "       jl      0b\n"
-               : "=&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "=&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 
        asm volatile(
                "       cds     %0,%2,%1"
-               : "+&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "+&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
        return new;
 }
 
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       long long old, new;
-
-       do {
-               old = atomic64_read(v);
-               new = old - i;
-       } while (atomic64_cmpxchg(v, old, new) != old);
-       return new;
-}
-
 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
 {
        long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
 
 #endif /* CONFIG_64BIT */
 
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "agsi   %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic64_add_return(i, v);
+       }
+#else
+       atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 {
        long long c, old;
 
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
        for (;;) {
                if (unlikely(c == u))
                        break;
-               old = atomic64_cmpxchg(v, c, c + a);
+               old = atomic64_cmpxchg(v, c, c + i);
                if (likely(old == c))
                        break;
                c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
        return dec;
 }
 
-#define atomic64_add(_i, _v)           atomic64_add_return(_i, _v)
 #define atomic64_add_negative(_i, _v)  (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v)               atomic64_add_return(1, _v)
+#define atomic64_inc(_v)               atomic64_add(1, _v)
 #define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v)           atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v)    atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v)           atomic64_add(-(long long)_i, _v)
 #define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v)               atomic64_sub_return(1, _v)
+#define atomic64_dec(_v)               atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
 #define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
index 10135a38673c04894c69e36ee244a756ec28d30e..6e6ad06808293b7e88949351f647e516af8f16b2 100644 (file)
@@ -1,10 +1,40 @@
 /*
- *  S390 version
- *    Copyright IBM Corp. 1999
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *    Copyright IBM Corp. 1999,2013
  *
- *  Derived from "include/asm-i386/bitops.h"
- *    Copyright (C) 1992, Linus Torvalds
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first.  Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ *   |63..............0|127............64|191...........128|255...........196|
+ * and on s390:
+ *   |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
  *
  */
 
 #error only <linux/bitops.h> can be included directly
 #endif
 
+#include <linux/typecheck.h>
 #include <linux/compiler.h>
 
-/*
- * 32 bit bitops format:
- * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
- * bit 32 is the LSB of *(addr+4). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
- *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- *
- * 64 bit bitops format:
- * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
- * bit 64 is the LSB of *(addr+8). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
- *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
- *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
- *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
- *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- */
-
-/* bitmap tables from arch/s390/kernel/bitmap.c */
-extern const char _oi_bitmap[];
-extern const char _ni_bitmap[];
-extern const char _zb_findmap[];
-extern const char _sb_findmap[];
-
 #ifndef CONFIG_64BIT
 
 #define __BITOPS_OR            "or"
 #define __BITOPS_AND           "nr"
 #define __BITOPS_XOR           "xr"
 
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)        \
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old, __new;                             \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
        asm volatile(                                           \
                "       l       %0,%2\n"                        \
                "0:     lr      %1,%0\n"                        \
                __op_string "   %1,%3\n"                        \
                "       cs      %0,%1,%2\n"                     \
                "       jl      0b"                             \
-               : "=&d" (__old), "=&d" (__new),                 \
-                 "=Q" (*(unsigned long *) __addr)              \
-               : "d" (__val), "Q" (*(unsigned long *) __addr)  \
-               : "cc");
+               : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
 
 #else /* CONFIG_64BIT */
 
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR            "laog"
+#define __BITOPS_AND           "lang"
+#define __BITOPS_XOR           "laxg"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old;                                    \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
+       asm volatile(                                           \
+               __op_string "   %0,%2,%1\n"                     \
+               : "=d" (__old), "+Q" (*(__addr))                \
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 #define __BITOPS_OR            "ogr"
 #define __BITOPS_AND           "ngr"
 #define __BITOPS_XOR           "xgr"
 
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)        \
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old, __new;                             \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
        asm volatile(                                           \
                "       lg      %0,%2\n"                        \
                "0:     lgr     %1,%0\n"                        \
                __op_string "   %1,%3\n"                        \
                "       csg     %0,%1,%2\n"                     \
                "       jl      0b"                             \
-               : "=&d" (__old), "=&d" (__new),                 \
-                 "=Q" (*(unsigned long *) __addr)              \
-               : "d" (__val), "Q" (*(unsigned long *) __addr)  \
-               : "cc");
+               : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 
 #endif /* CONFIG_64BIT */
 
 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
-#ifdef CONFIG_SMP
-/*
- * SMP safe set_bit routine based on compare and swap (CS)
- */
-static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+       unsigned long addr;
+
+       addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+       return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make OR mask */
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
+
+               asm volatile(
+                       "oi     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (1 << (nr & 7))
+                       : "cc");
+               return;
+       }
+#endif
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+       __BITOPS_LOOP(addr, mask, __BITOPS_OR);
 }
 
-/*
- * SMP safe clear_bit routine based on compare and swap (CS)
- */
-static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make AND mask */
+               asm volatile(
+                       "ni     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (~(1 << (nr & 7)))
+                       : "cc");
+               return;
+       }
+#endif
        mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+       __BITOPS_LOOP(addr, mask, __BITOPS_AND);
 }
 
-/*
- * SMP safe change_bit routine based on compare and swap (CS)
- */
-static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make XOR mask */
+               asm volatile(
+                       "xi     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (1 << (nr & 7))
+                       : "cc");
+               return;
+       }
+#endif
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+       __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
 }
 
-/*
- * SMP safe test_and_set_bit routine based on compare and swap (CS)
- */
 static inline int
-test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make OR/test mask */
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
        barrier();
        return (old & mask) != 0;
 }
 
-/*
- * SMP safe test_and_clear_bit routine based on compare and swap (CS)
- */
 static inline int
-test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make AND/test mask */
        mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
        barrier();
-       return (old ^ new) != 0;
+       return (old & ~mask) != 0;
 }
 
-/*
- * SMP safe test_and_change_bit routine based on compare and swap (CS) 
- */
 static inline int
-test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make XOR/test mask */
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
        barrier();
        return (old & mask) != 0;
 }
-#endif /* CONFIG_SMP */
 
-/*
- * fast, non-SMP set_bit routine
- */
 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       oc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr |= 1 << (nr & 7);
+       *addr |= 1 << (nr & 7);
 }
 
-#define set_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_set_bit((nr),(addr)) : \
- __set_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP clear_bit routine
- */
 static inline void 
 __clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       nc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr &= ~(1 << (nr & 7));
+       *addr &= ~(1 << (nr & 7));
 }
 
-#define clear_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_clear_bit((nr),(addr)) : \
- __clear_bit((nr),(addr)) )
-
-/* 
- * fast, non-SMP change_bit routine 
- */
 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       xc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr ^= 1 << (nr & 7);
+       *addr ^= 1 << (nr & 7);
 }
 
-#define change_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_change_bit((nr),(addr)) : \
- __change_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP test_and_set_bit routine
- */
 static inline int
-test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       oc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr |= 1 << (nr & 7);
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_set_bit(X,Y)                test_and_set_bit_simple(X,Y)
 
-/*
- * fast, non-SMP test_and_clear_bit routine
- */
 static inline int
-test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       nc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr &= ~(1 << (nr & 7));
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_clear_bit(X,Y)      test_and_clear_bit_simple(X,Y)
 
-/*
- * fast, non-SMP test_and_change_bit routine
- */
 static inline int
-test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       xc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr ^= 1 << (nr & 7);
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_change_bit(X,Y)     test_and_change_bit_simple(X,Y)
-
-#ifdef CONFIG_SMP
-#define set_bit             set_bit_cs
-#define clear_bit           clear_bit_cs
-#define change_bit          change_bit_cs
-#define test_and_set_bit    test_and_set_bit_cs
-#define test_and_clear_bit  test_and_clear_bit_cs
-#define test_and_change_bit test_and_change_bit_cs
-#else
-#define set_bit             set_bit_simple
-#define clear_bit           clear_bit_simple
-#define change_bit          change_bit_simple
-#define test_and_set_bit    test_and_set_bit_simple
-#define test_and_clear_bit  test_and_clear_bit_simple
-#define test_and_change_bit test_and_change_bit_simple
-#endif
-
-
-/*
- * This routine doesn't need to be atomic.
- */
 
-static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
 {
-       unsigned long addr;
-       unsigned char ch;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(volatile unsigned char *) addr;
-       return (ch >> (nr & 7)) & 1;
-}
+       const volatile unsigned char *addr;
 
-static inline int 
-__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
-    return (((volatile char *) addr)
-           [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
+       addr = ((const volatile unsigned char *)ptr);
+       addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+       return (*addr >> (nr & 7)) & 1;
 }
 
-#define test_bit(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)) )
-
 /*
- * Optimized find bit helper functions.
- */
-
-/**
- * __ffz_word_loop - find byte offset of first long != -1UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
  */
-static inline unsigned long __ffz_word_loop(const unsigned long *addr,
-                                           unsigned long size)
-{
-       typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
-       unsigned long bytes = 0;
-
-       asm volatile(
-#ifndef CONFIG_64BIT
-               "       ahi     %1,-1\n"
-               "       sra     %1,5\n"
-               "       jz      1f\n"
-               "0:     c       %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,4(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#else
-               "       aghi    %1,-1\n"
-               "       srag    %1,%1,6\n"
-               "       jz      1f\n"
-               "0:     cg      %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,8(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#endif
-               : "+&a" (bytes), "+&d" (size)
-               : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
-               : "cc" );
-       return bytes;
-}
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+                               unsigned long offset);
 
-/**
- * __ffs_word_loop - find byte offset of first long != 0UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
- */
-static inline unsigned long __ffs_word_loop(const unsigned long *addr,
-                                           unsigned long size)
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-       typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
-       unsigned long bytes = 0;
-
-       asm volatile(
-#ifndef CONFIG_64BIT
-               "       ahi     %1,-1\n"
-               "       sra     %1,5\n"
-               "       jz      1f\n"
-               "0:     c       %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,4(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#else
-               "       aghi    %1,-1\n"
-               "       srag    %1,%1,6\n"
-               "       jz      1f\n"
-               "0:     cg      %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,8(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#endif
-               : "+&a" (bytes), "+&a" (size)
-               : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
-               : "cc" );
-       return bytes;
+       return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __ffz_word - add number of the first unset bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for unset bits
- */
-static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-#ifdef CONFIG_64BIT
-       if ((word & 0xffffffff) == 0xffffffff) {
-               word >>= 32;
-               nr += 32;
-       }
-#endif
-       if ((word & 0xffff) == 0xffff) {
-               word >>= 16;
-               nr += 16;
-       }
-       if ((word & 0xff) == 0xff) {
-               word >>= 8;
-               nr += 8;
-       }
-       return nr + _zb_findmap[(unsigned char) word];
+       return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __ffs_word - add number of the first set bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for set bits
- */
-static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-#ifdef CONFIG_64BIT
-       if ((word & 0xffffffff) == 0) {
-               word >>= 32;
-               nr += 32;
-       }
-#endif
-       if ((word & 0xffff) == 0) {
-               word >>= 16;
-               nr += 16;
-       }
-       if ((word & 0xff) == 0) {
-               word >>= 8;
-               nr += 8;
-       }
-       return nr + _sb_findmap[(unsigned char) word];
+       return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-
-/**
- * __load_ulong_be - load big endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_be(const unsigned long *p,
-                                           unsigned long offset)
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-       p = (unsigned long *)((unsigned long) p + offset);
-       return *p;
+       return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __load_ulong_le - load little endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_le(const unsigned long *p,
-                                           unsigned long offset)
+static inline int test_bit_inv(unsigned long nr,
+                              const volatile unsigned long *ptr)
 {
-       unsigned long word;
-
-       p = (unsigned long *)((unsigned long) p + offset);
-#ifndef CONFIG_64BIT
-       asm volatile(
-               "       ic      %0,%O1(%R1)\n"
-               "       icm     %0,2,%O1+1(%R1)\n"
-               "       icm     %0,4,%O1+2(%R1)\n"
-               "       icm     %0,8,%O1+3(%R1)"
-               : "=&d" (word) : "Q" (*p) : "cc");
-#else
-       asm volatile(
-               "       lrvg    %0,%1"
-               : "=d" (word) : "m" (*p) );
-#endif
-       return word;
+       return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/*
- * The various find bit functions.
- */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 
-/*
- * ffz - find first zero in word.
- * @word: The word to search
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
  *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
-       return __ffz_word(0, word);
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+       if (__builtin_constant_p(word)) {
+               unsigned long bit = 0;
+
+               if (!word)
+                       return 64;
+               if (!(word & 0xffffffff00000000UL)) {
+                       word <<= 32;
+                       bit += 32;
+               }
+               if (!(word & 0xffff000000000000UL)) {
+                       word <<= 16;
+                       bit += 16;
+               }
+               if (!(word & 0xff00000000000000UL)) {
+                       word <<= 8;
+                       bit += 8;
+               }
+               if (!(word & 0xf000000000000000UL)) {
+                       word <<= 4;
+                       bit += 4;
+               }
+               if (!(word & 0xc000000000000000UL)) {
+                       word <<= 2;
+                       bit += 2;
+               }
+               if (!(word & 0x8000000000000000UL)) {
+                       word <<= 1;
+                       bit += 1;
+               }
+               return bit;
+       } else {
+               register unsigned long bit asm("4") = word;
+               register unsigned long out asm("5");
+
+               asm volatile(
+                       "       flogr   %[bit],%[bit]\n"
+                       : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+               return bit;
+       }
 }
 
 /**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
 {
-       return __ffs_word(0, word);
+       return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
 }
 
 /**
  * ffs - find first bit set
- * @x: the word to search
+ * @word: the word to search
  *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
  */
-static inline int ffs(int x)
+static inline int ffs(int word)
 {
-       if (!x)
-               return 0;
-       return __ffs_word(1, x);
+       unsigned long mask = 2 * BITS_PER_LONG - 1;
+       unsigned int val = (unsigned int)word;
+
+       return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
 }
 
 /**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
  *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
+ * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long find_first_zero_bit(const unsigned long *addr,
-                                               unsigned long size)
+static inline unsigned long __fls(unsigned long word)
 {
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffz_word_loop(addr, size);
-       bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
+       return __flogr(word) ^ (BITS_PER_LONG - 1);
 }
-#define find_first_zero_bit find_first_zero_bit
 
 /**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
  *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned long find_first_bit(const unsigned long * addr,
-                                          unsigned long size)
-{
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffs_word_loop(addr, size);
-       bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_bit find_first_bit
-
-/*
- * Big endian variant whichs starts bit counting from left using
- * the flogr (find leftmost one) instruction.
- */
-static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
-{
-       register unsigned long bit asm("2") = val;
-       register unsigned long out asm("3");
-
-       asm volatile (
-               "       .insn   rre,0xb9830000,%[bit],%[bit]\n"
-               : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
-       return nr + bit;
-}
-
-/*
- * 64 bit special left bitops format:
- * order in memory:
- *    00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
- *    10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
- *    20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
- *    30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
- * after that follows the next long with bit numbers
- *    40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
- *    50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
- *    60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
- *    70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
- * The reason for this bit ordering is the fact that
- * the hardware sets bits in a bitmap starting at bit 0
- * and we don't want to scan the bitmap from the 'wrong
- * end'.
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
  */
-static inline unsigned long find_first_bit_left(const unsigned long *addr,
-                                               unsigned long size)
-{
-       unsigned long bytes, bits;
-
-       if (!size)
-               return 0;
-       bytes = __ffs_word_loop(addr, size);
-       bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
-}
-
-static inline int find_next_bit_left(const unsigned long *addr,
-                                    unsigned long size,
-                                    unsigned long offset)
+static inline int fls64(unsigned long word)
 {
-       const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               set = __flo_word(0, *p & (~0UL >> bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit_left(p, size);
-}
-
-#define for_each_set_bit_left(bit, addr, size)                         \
-       for ((bit) = find_first_bit_left((addr), (size));               \
-            (bit) < (size);                                            \
-            (bit) = find_next_bit_left((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_left_cont(bit, addr, size)                    \
-       for ((bit) = find_next_bit_left((addr), (size), (bit));         \
-            (bit) < (size);                                            \
-            (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+       unsigned long mask = 2 * BITS_PER_LONG - 1;
 
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr,
-                                     unsigned long size,
-                                     unsigned long offset)
-{
-        const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * __ffz_word returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffz_word(bit, *p >> bit);
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_zero_bit(p, size);
+       return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
 }
-#define find_next_zero_bit find_next_zero_bit
 
 /**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline int find_next_bit (const unsigned long * addr,
-                                unsigned long size,
-                                unsigned long offset)
+static inline int fls(int word)
 {
-        const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * __ffs_word returns BITS_PER_LONG
-                * if no one bit is present in the word.
-                */
-               set = __ffs_word(0, *p & (~0UL << bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit(p, size);
+       return fls64((unsigned int)word);
 }
-#define find_next_bit find_next_bit
 
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
-       return find_first_bit(b, 140);
-}
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
 
-#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/fls64.h>
 
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/lock.h>
-
-/*
- * ATTENTION: intel byte ordering convention for ext2 and minix !!
- * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
- * bit 32 is the LSB of (addr+4).
- * That combined with the little endian byte order of Intel gives the
- * following bit order in memory:
- *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
- *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
- */
-
-static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
-{
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffz_word_loop(vaddr, size);
-       bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
-                                         unsigned long offset)
-{
-        unsigned long *addr = vaddr, *p;
-       unsigned long bit, set;
-
-        if (offset >= size)
-                return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-        if (bit) {
-               /*
-                * s390 version of ffz returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-        }
-       return offset + find_first_zero_bit_le(p, size);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
-{
-       unsigned long bytes, bits;
-
-       if (!size)
-               return 0;
-       bytes = __ffs_word_loop(vaddr, size);
-       bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline int find_next_bit_le(void *vaddr, unsigned long size,
-                                    unsigned long offset)
-{
-       unsigned long *addr = vaddr, *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * s390 version of ffz returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit_le(p, size);
-}
-#define find_next_bit_le find_next_bit_le
-
+#include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/le.h>
-
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
 
 #endif /* _S390_BITOPS_H */
index c1e7c646727cd4c71c56a5040a477a6d8618d565..4bf9da03591e7abda64ab4632f511c16b7734d16 100644 (file)
@@ -22,6 +22,7 @@
 #define PSW32_MASK_ASC         0x0000C000UL
 #define PSW32_MASK_CC          0x00003000UL
 #define PSW32_MASK_PM          0x00000f00UL
+#define PSW32_MASK_RI          0x00000080UL
 
 #define PSW32_MASK_USER                0x0000FF00UL
 
@@ -35,7 +36,9 @@
 #define PSW32_ASC_SECONDARY    0x00008000UL
 #define PSW32_ASC_HOME         0x0000C000UL
 
-extern u32 psw32_user_bits;
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+                        PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+                        PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
 
 #define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "s390\0\0\0\0"
index debfda33d1f86d88a8b3bce89e6b23962c643489..9b69c0befdcaafcb46565056e10c36e884ab8065 100644 (file)
@@ -8,69 +8,59 @@
 #define __ASM_CTL_REG_H
 
 #ifdef CONFIG_64BIT
-
-#define __ctl_load(array, low, high) ({                                \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       lctlg   %1,%2,%0\n"                     \
-               : : "Q" (*(addrtype *)(&array)),                \
-                   "i" (low), "i" (high));                     \
-       })
-
-#define __ctl_store(array, low, high) ({                       \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       stctg   %1,%2,%0\n"                     \
-               : "=Q" (*(addrtype *)(&array))                  \
-               : "i" (low), "i" (high));                       \
-       })
-
-#else /* CONFIG_64BIT */
-
-#define __ctl_load(array, low, high) ({                                \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       lctl    %1,%2,%0\n"                     \
-               : : "Q" (*(addrtype *)(&array)),                \
-                   "i" (low), "i" (high));                     \
-})
-
-#define __ctl_store(array, low, high) ({                       \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       stctl   %1,%2,%0\n"                     \
-               : "=Q" (*(addrtype *)(&array))                  \
-               : "i" (low), "i" (high));                       \
-       })
-
-#endif /* CONFIG_64BIT */
-
-#define __ctl_set_bit(cr, bit) ({      \
-       unsigned long __dummy;          \
-       __ctl_store(__dummy, cr, cr);   \
-       __dummy |= 1UL << (bit);        \
-       __ctl_load(__dummy, cr, cr);    \
-})
-
-#define __ctl_clear_bit(cr, bit) ({    \
-       unsigned long __dummy;          \
-       __ctl_store(__dummy, cr, cr);   \
-       __dummy &= ~(1UL << (bit));     \
-       __ctl_load(__dummy, cr, cr);    \
-})
+# define __CTL_LOAD    "lctlg"
+# define __CTL_STORE   "stctg"
+#else
+# define __CTL_LOAD    "lctl"
+# define __CTL_STORE   "stctl"
+#endif
+
+#define __ctl_load(array, low, high) {                                 \
+       typedef struct { char _[sizeof(array)]; } addrtype;             \
+                                                                       \
+       BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+       asm volatile(                                                   \
+               __CTL_LOAD " %1,%2,%0\n"                                \
+               : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) {                                        \
+       typedef struct { char _[sizeof(array)]; } addrtype;             \
+                                                                       \
+       BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+       asm volatile(                                                   \
+               __CTL_STORE " %1,%2,%0\n"                               \
+               : "=Q" (*(addrtype *)(&array))                          \
+               : "i" (low), "i" (high));                               \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+       unsigned long reg;
+
+       __ctl_store(reg, cr, cr);
+       reg |= 1UL << bit;
+       __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+       unsigned long reg;
+
+       __ctl_store(reg, cr, cr);
+       reg &= ~(1UL << bit);
+       __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
 
 #ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 #else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
 
 #endif /* __ASM_CTL_REG_H */
index 188c5052a20ab663f878afc2c23889ec53384d8e..530c15eb01e99ad5970b556234c65d34a8bdf141 100644 (file)
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
 void debug_set_critical(void);
 void debug_stop_all(void);
 
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+       return level <= id->level;
+}
+
 static inline debug_entry_t*
 debug_event(debug_info_t* id, int level, void* data, int length)
 {
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644 (file)
index 0000000..04a83f5
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR    0x1     /* Operand printed as %rx */
+#define OPERAND_FPR    0x2     /* Operand printed as %fx */
+#define OPERAND_AR     0x4     /* Operand printed as %ax */
+#define OPERAND_CR     0x8     /* Operand printed as %cx */
+#define OPERAND_DISP   0x10    /* Operand printed as displacement */
+#define OPERAND_BASE   0x20    /* Operand printed as base register */
+#define OPERAND_INDEX  0x40    /* Operand printed as index register */
+#define OPERAND_PCREL  0x80    /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100   /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200   /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+       int bits;               /* The number of bits in the operand. */
+       int shift;              /* The number of bits to shift. */
+       int flags;              /* One bit syntax flags. */
+};
+
+struct s390_insn {
+       const char name[5];
+       unsigned char opfrag;
+       unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+       return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+       return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
index ef61709950765bbdd72879c473b49d509c7adf0a..7ecb92b469b67bc45a099d9fea8bb72e375fc704 100644 (file)
@@ -12,9 +12,9 @@
 
 #define TCW_FORMAT_DEFAULT             0
 #define TCW_TIDAW_FORMAT_DEFAULT       0
-#define TCW_FLAGS_INPUT_TIDA           1 << (23 - 5)
-#define TCW_FLAGS_TCCB_TIDA            1 << (23 - 6)
-#define TCW_FLAGS_OUTPUT_TIDA          1 << (23 - 7)
+#define TCW_FLAGS_INPUT_TIDA           (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA            (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA          (1 << (23 - 7))
 #define TCW_FLAGS_TIDAW_FORMAT(x)      ((x) & 3) << (23 - 9)
 #define TCW_FLAGS_GET_TIDAW_FORMAT(x)  (((x) >> (23 - 9)) & 3)
 
@@ -54,11 +54,11 @@ struct tcw {
        u32 intrg;
 } __attribute__ ((packed, aligned(64)));
 
-#define TIDAW_FLAGS_LAST               1 << (7 - 0)
-#define TIDAW_FLAGS_SKIP               1 << (7 - 1)
-#define TIDAW_FLAGS_DATA_INT           1 << (7 - 2)
-#define TIDAW_FLAGS_TTIC               1 << (7 - 3)
-#define TIDAW_FLAGS_INSERT_CBC         1 << (7 - 4)
+#define TIDAW_FLAGS_LAST               (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP               (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT           (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC               (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC         (1 << (7 - 4))
 
 /**
  * struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
        u8 sense[32];
 } __attribute__ ((packed));
 
-#define TSA_INTRG_FLAGS_CU_STATE_VALID         1 << (7 - 0)
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID                1 << (7 - 1)
-#define TSA_INTRG_FLAGS_OP_STATE_VALID         1 << (7 - 2)
+#define TSA_INTRG_FLAGS_CU_STATE_VALID         (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID                (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID         (1 << (7 - 2))
 
 /**
  * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
 #define TSB_FORMAT_DDPC                2
 #define TSB_FORMAT_INTRG       3
 
-#define TSB_FLAGS_DCW_OFFSET_VALID     1 << (7 - 0)
-#define TSB_FLAGS_COUNT_VALID          1 << (7 - 1)
-#define TSB_FLAGS_CACHE_MISS           1 << (7 - 2)
-#define TSB_FLAGS_TIME_VALID           1 << (7 - 3)
+#define TSB_FLAGS_DCW_OFFSET_VALID     (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID          (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS           (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID           (1 << (7 - 3))
 #define TSB_FLAGS_FORMAT(x)            ((x) & 7)
 #define TSB_FORMAT(t)                  ((t)->flags & 7)
 
@@ -179,9 +179,9 @@ struct tsb {
 #define DCW_INTRG_RCQ_PRIMARY          1
 #define DCW_INTRG_RCQ_SECONDARY                2
 
-#define DCW_INTRG_FLAGS_MPM            1 < (7 - 0)
-#define DCW_INTRG_FLAGS_PPR            1 < (7 - 1)
-#define DCW_INTRG_FLAGS_CRIT           1 < (7 - 2)
+#define DCW_INTRG_FLAGS_MPM            (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR            (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT           (1 << (7 - 2))
 
 /**
  * struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
        u8  prog_data[0];
 } __attribute__ ((packed));
 
-#define DCW_FLAGS_CC           1 << (7 - 1)
+#define DCW_FLAGS_CC           (1 << (7 - 1))
 
 #define DCW_CMD_WRITE          0x01
 #define DCW_CMD_READ           0x02
index 2bd6cb897b90e86f129a6ae2b4eeca6bd495de95..2fcccc0c997cc3102dcb67e5ea2f60fc4f4200b2 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _ASM_S390_IPL_H
 #define _ASM_S390_IPL_H
 
+#include <asm/lowcore.h>
 #include <asm/types.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
  */
 extern u32 ipl_flags;
 extern u32 dump_prefix_page;
-extern unsigned int zfcpdump_prefix_array[];
+
+struct dump_save_areas {
+       struct save_area **areas;
+       int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area *dump_save_area_create(int cpu);
 
 extern void do_reipl(void);
 extern void do_halt(void);
index 6c32190dc73e880255175049fe36965e647101eb..346b1c85ffb40d890078550dc72c2dedcb275a2e 100644 (file)
@@ -15,7 +15,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("0:    brcl 0,0\n"
+       asm_volatile_goto("0:   brcl 0,0\n"
                ".pushsection __jump_table, \"aw\"\n"
                ASM_ALIGN "\n"
                ASM_PTR " 0b, %l[label], %0\n"
index 9f973d8de90ea91fbde12f11cff70c8470bf8a81..5d1f950704dc6272ec368279b2a01f82274fe220 100644 (file)
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
        pgd_t *pgd = mm->pgd;
 
        S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-       if (s390_user_mode != HOME_SPACE_MODE) {
-               /* Load primary space page table origin. */
-               asm volatile(LCTL_OPCODE" 1,1,%0\n"
-                            : : "m" (S390_lowcore.user_asce) );
-       } else
-               /* Load home space page table origin. */
-               asm volatile(LCTL_OPCODE" 13,13,%0"
-                            : : "m" (S390_lowcore.user_asce) );
+       /* Load primary space page table origin. */
+       asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
        set_fs(current->thread.mm_segment);
 }
 
index 688271f5f2e452b9951599550f33ed0ddcfe0a7c..458c1f7fbc1808d48982aa0c5fe89bfe3df2098c 100644 (file)
@@ -7,5 +7,3 @@
  */
 
 #include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax() barrier()
index 1e51f2915b2eea6a1396c7dfb2d63ffc2f671ee9..316c8503a3b4fda3824ca071065f145038b34a0c 100644 (file)
 #include <asm/setup.h>
 #ifndef __ASSEMBLY__
 
-void storage_key_init_range(unsigned long start, unsigned long end);
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+       __storage_key_init_range(start, end);
+#endif
+}
 
 static inline void clear_page(void *page)
 {
index 1ca5d1047c71742f5e3c5681c89a4b98e4e87530..ac24b26fc065be9bad9443fc7a86cd1e24ab3abe 100644 (file)
@@ -6,14 +6,9 @@
 extern debug_info_t *pci_debug_msg_id;
 extern debug_info_t *pci_debug_err_id;
 
-#ifdef CONFIG_PCI_DEBUG
 #define zpci_dbg(imp, fmt, args...)                            \
        debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
 
-#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(imp, fmt, args...) do { } while (0)
-#endif
-
 #define zpci_err(text...)                                                      \
        do {                                                                    \
                char debug_buffer[16];                                          \
index df6eac9f0cb4e324069e3bae65246a7a99f02888..649eb62c52b3784ec0214e47482b87442d3081dd 100644 (file)
 struct zpci_fib {
        u32 fmt         :  8;   /* format */
        u32             : 24;
-       u32 reserved1;
+       u32             : 32;
        u8 fc;                  /* function controls */
-       u8 reserved2;
-       u16 reserved3;
-       u32 reserved4;
+       u64             : 56;
        u64 pba;                /* PCI base address */
        u64 pal;                /* PCI address limit */
        u64 iota;               /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
        u32 sum         :  1;   /* Adapter int summary bit enabled */
        u32             :  1;
        u32 aisbo       :  6;   /* Adapter int summary bit offset */
-       u32 reserved5;
+       u32             : 32;
        u64 aibv;               /* Adapter int bit vector address */
        u64 aisb;               /* Adapter int summary bit address */
        u64 fmb_addr;           /* Function measurement block address and key */
-       u64 reserved6;
-       u64 reserved7;
-} __packed;
-
+       u32             : 32;
+       u32 gd;
+} __packed __aligned(8);
 
 int zpci_mod_fc(u64 req, struct zpci_fib *fib);
 int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
index 86fe0ee2cee5945beacca37f563d6d118fae0a12..061ab45faf7015aa7e7ba0d8b7bbfa7b613a11c9 100644 (file)
  */
 #define __my_cpu_offset S390_lowcore.percpu_offset
 
+#ifdef CONFIG_64BIT
+
 /*
  * For 64 bit module code, the module may be more than 4G above the
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
-#define arch_this_cpu_to_op(pcp, val, op)                              \
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op)                       \
 ({                                                                     \
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ old__, new__, prev__;                                \
        do {                                                            \
                old__ = prev__;                                         \
                new__ = old__ op (val);                                 \
-               switch (sizeof(*ptr__)) {                               \
-               case 8:                                                 \
-                       prev__ = cmpxchg64(ptr__, old__, new__);        \
-                       break;                                          \
-               default:                                                \
-                       prev__ = cmpxchg(ptr__, old__, new__);          \
-               }                                                       \
+               prev__ = cmpxchg(ptr__, old__, new__);                  \
        } while (prev__ != old__);                                      \
        preempt_enable();                                               \
        new__;                                                          \
 })
 
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_xor_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+#define this_cpu_xor_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_xor_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+#define this_cpu_xor_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast)                  \
+{                                                                      \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       if (__builtin_constant_p(val__) &&                              \
+           ((szcast)val__ > -129) && ((szcast)val__ < 128)) {          \
+               asm volatile(                                           \
+                       op2 "   %[ptr__],%[val__]\n"                    \
+                       : [ptr__] "+Q" (*ptr__)                         \
+                       : [val__] "i" ((szcast)val__)                   \
+                       : "cc");                                        \
+       } else {                                                        \
+               asm volatile(                                           \
+                       op1 "   %[old__],%[val__],%[ptr__]\n"           \
+                       : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)   \
+                       : [val__] "d" (val__)                           \
+                       : "cc");                                        \
+       }                                                               \
+       preempt_enable();                                               \
+}
 
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
 
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define arch_this_cpu_add_return(pcp, val, op)                         \
+({                                                                     \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+       preempt_enable();                                               \
+       old__ + val__;                                                  \
+})
 
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
 
-#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define arch_this_cpu_to_op(pcp, val, op)                              \
+{                                                                      \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+       preempt_enable();                                               \
+}
+
+#define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val)       arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val)                arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val)                arch_this_cpu_to_op(pcp, val, "laog")
+#define this_cpu_xor_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lax")
+#define this_cpu_xor_8(pcp, val)       arch_this_cpu_to_op(pcp, val, "laxg")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 
 #define arch_this_cpu_cmpxchg(pcp, oval, nval)                         \
 ({                                                                     \
        pcp_op_T__ *ptr__;                                              \
        preempt_disable();                                              \
        ptr__ = __this_cpu_ptr(&(pcp));                                 \
-       switch (sizeof(*ptr__)) {                                       \
-       case 8:                                                         \
-               ret__ = cmpxchg64(ptr__, oval, nval);                   \
-               break;                                                  \
-       default:                                                        \
-               ret__ = cmpxchg(ptr__, oval, nval);                     \
-       }                                                               \
+       ret__ = cmpxchg(ptr__, oval, nval);                             \
        preempt_enable();                                               \
        ret__;                                                          \
 })
 #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
 #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
 
 #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2)       \
 ({                                                                     \
 })
 
 #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
 #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
 
 #include <asm-generic/percpu.h>
 
index 9b60a36c348d5422dc325463bcb26efeee64161d..2204400d0bd58d4a1e45c82394ff5cbd100aa2cc 100644 (file)
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 
 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
 {
-       if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
+       if (!MACHINE_HAS_ESOP &&
+           (pte_val(entry) & _PAGE_PRESENT) &&
+           (pte_val(entry) & _PAGE_WRITE)) {
                /*
                 * Without enhanced suppression-on-protection force
                 * the dirty bit on for all writable ptes.
index 0eb37505cab11c71f083ed508f02c98a72127e95..a56e63483e0f73bc36b8493c1a34b5cd1d3f5579 100644 (file)
@@ -134,14 +134,14 @@ struct stack_frame {
  * Do necessary setup to start up a new thread.
  */
 #define start_thread(regs, new_psw, new_stackp) do {                   \
-       regs->psw.mask  = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA;    \
+       regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
        regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
        regs->gprs[15]  = new_stackp;                                   \
        execve_tail();                                                  \
 } while (0)
 
 #define start_thread31(regs, new_psw, new_stackp) do {                 \
-       regs->psw.mask  = psw_user_bits | PSW_MASK_BA;                  \
+       regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
        regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
        regs->gprs[15]  = new_stackp;                                   \
        __tlb_flush_mm(current->mm);                                    \
@@ -169,17 +169,15 @@ extern void release_thread(struct task_struct *);
  */
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
-extern void show_code(struct pt_regs *regs);
-extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
-                           unsigned int len);
-
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
         (task_stack_page(tsk) + THREAD_SIZE) - 1)
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->psw.addr)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->gprs[15])
 
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
 static inline unsigned short stap(void)
 {
        unsigned short cpu_address;
@@ -198,6 +196,8 @@ static inline void cpu_relax(void)
        barrier();
 }
 
+#define arch_mutex_cpu_relax()  barrier()
+
 static inline void psw_set_key(unsigned int key)
 {
        asm volatile("spka 0(%0)" : : "d" (key));
@@ -346,9 +346,9 @@ __set_psw_mask(unsigned long mask)
 }
 
 #define local_mcck_enable() \
-       __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+       __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
 #define local_mcck_disable() \
-       __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+       __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
 
 /*
  * Basic Machine Check/Program Check Handler.
index 52b56533c57cda08f4d8283253ed682a13536f5c..9c82cebddabd78938b1d66baef004c74f3bd97ec 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-extern long psw_kernel_bits;
-extern long psw_user_bits;
+#define PSW_KERNEL_BITS        (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+                        PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS  (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+                        PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+                        PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
 
 /*
  * The pt_regs struct defines the way the registers are stored on
index 59880dbaf360a6f39563990506e3c6a5af648aae..df802ee14af6f76591cebdc54030488b42c3d0cc 100644 (file)
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
 void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
                     unsigned long size);
 
-#define PRIMARY_SPACE_MODE     0
-#define ACCESS_REGISTER_MODE   1
-#define SECONDARY_SPACE_MODE   2
-#define HOME_SPACE_MODE                3
-
-extern unsigned int s390_user_mode;
-
 /*
  * Machine features detected in head.S
  */
index b64f15c3b4cc739a13e76ec3c1287328315ecb74..ac9bed8e103fa741f85b3aecc64fcc9a18511c82 100644 (file)
@@ -14,7 +14,6 @@
 #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
 
 extern struct mutex smp_cpu_state_mutex;
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
 
 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
 
index 701fe8c59e1f0e9efc302e711eef5bcfe3db479a..83e5d216105e86a2df2ab1367bcfc3fd1590542a 100644 (file)
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 extern int arch_spin_trylock_retry(arch_spinlock_t *);
 extern void arch_spin_relax(arch_spinlock_t *lock);
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.owner_cpu == 0;
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lp)
 {
        int old;
index 6dbd559763c9996c099f14e4ce6926e15930b7f8..29c81f82705e139dc53a9af3f72b0db3d9e14695 100644 (file)
 extern struct task_struct *__switch_to(void *, void *);
 extern void update_cr_regs(struct task_struct *task);
 
-static inline void save_fp_regs(s390_fp_regs *fpregs)
+static inline int test_fp_ctl(u32 fpc)
 {
+       u32 orig_fpc;
+       int rc;
+
+       if (!MACHINE_HAS_IEEE)
+               return 0;
+
        asm volatile(
-               "       std     0,%O0+8(%R0)\n"
-               "       std     2,%O0+24(%R0)\n"
-               "       std     4,%O0+40(%R0)\n"
-               "       std     6,%O0+56(%R0)"
-               : "=Q" (*fpregs) : "Q" (*fpregs));
+               "       efpc    %1\n"
+               "       sfpc    %2\n"
+               "0:     sfpc    %1\n"
+               "       la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc), "=d" (orig_fpc)
+               : "d" (fpc), "0" (-EINVAL));
+       return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
        if (!MACHINE_HAS_IEEE)
                return;
+
        asm volatile(
-               "       stfpc   %0\n"
-               "       std     1,%O0+16(%R0)\n"
-               "       std     3,%O0+32(%R0)\n"
-               "       std     5,%O0+48(%R0)\n"
-               "       std     7,%O0+64(%R0)\n"
-               "       std     8,%O0+72(%R0)\n"
-               "       std     9,%O0+80(%R0)\n"
-               "       std     10,%O0+88(%R0)\n"
-               "       std     11,%O0+96(%R0)\n"
-               "       std     12,%O0+104(%R0)\n"
-               "       std     13,%O0+112(%R0)\n"
-               "       std     14,%O0+120(%R0)\n"
-               "       std     15,%O0+128(%R0)\n"
-               : "=Q" (*fpregs) : "Q" (*fpregs));
+               "       stfpc   %0\n"
+               : "+Q" (*fpc));
 }
 
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
+static inline int restore_fp_ctl(u32 *fpc)
 {
+       int rc;
+
+       if (!MACHINE_HAS_IEEE)
+               return 0;
+
        asm volatile(
-               "       ld      0,%O0+8(%R0)\n"
-               "       ld      2,%O0+24(%R0)\n"
-               "       ld      4,%O0+40(%R0)\n"
-               "       ld      6,%O0+56(%R0)"
-               : : "Q" (*fpregs));
+               "0:     lfpc    %1\n"
+               "       la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+       return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+       asm volatile("std 0,%0" : "=Q" (fprs[0]));
+       asm volatile("std 2,%0" : "=Q" (fprs[2]));
+       asm volatile("std 4,%0" : "=Q" (fprs[4]));
+       asm volatile("std 6,%0" : "=Q" (fprs[6]));
        if (!MACHINE_HAS_IEEE)
                return;
-       asm volatile(
-               "       lfpc    %0\n"
-               "       ld      1,%O0+16(%R0)\n"
-               "       ld      3,%O0+32(%R0)\n"
-               "       ld      5,%O0+48(%R0)\n"
-               "       ld      7,%O0+64(%R0)\n"
-               "       ld      8,%O0+72(%R0)\n"
-               "       ld      9,%O0+80(%R0)\n"
-               "       ld      10,%O0+88(%R0)\n"
-               "       ld      11,%O0+96(%R0)\n"
-               "       ld      12,%O0+104(%R0)\n"
-               "       ld      13,%O0+112(%R0)\n"
-               "       ld      14,%O0+120(%R0)\n"
-               "       ld      15,%O0+128(%R0)\n"
-               : : "Q" (*fpregs));
+       asm volatile("std 1,%0" : "=Q" (fprs[1]));
+       asm volatile("std 3,%0" : "=Q" (fprs[3]));
+       asm volatile("std 5,%0" : "=Q" (fprs[5]));
+       asm volatile("std 7,%0" : "=Q" (fprs[7]));
+       asm volatile("std 8,%0" : "=Q" (fprs[8]));
+       asm volatile("std 9,%0" : "=Q" (fprs[9]));
+       asm volatile("std 10,%0" : "=Q" (fprs[10]));
+       asm volatile("std 11,%0" : "=Q" (fprs[11]));
+       asm volatile("std 12,%0" : "=Q" (fprs[12]));
+       asm volatile("std 13,%0" : "=Q" (fprs[13]));
+       asm volatile("std 14,%0" : "=Q" (fprs[14]));
+       asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+       asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+       asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+       asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+       asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+       if (!MACHINE_HAS_IEEE)
+               return;
+       asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+       asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+       asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+       asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+       asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+       asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+       asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+       asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+       asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+       asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+       asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+       asm volatile("ld 15,%0" : : "Q" (fprs[15]));
 }
 
 static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
 
 #define switch_to(prev,next,last) do {                                 \
        if (prev->mm) {                                                 \
-               save_fp_regs(&prev->thread.fp_regs);                    \
+               save_fp_ctl(&prev->thread.fp_regs.fpc);                 \
+               save_fp_regs(prev->thread.fp_regs.fprs);                \
                save_access_regs(&prev->thread.acrs[0]);                \
                save_ri_cb(prev->thread.ri_cb);                         \
        }                                                               \
        if (next->mm) {                                                 \
-               restore_fp_regs(&next->thread.fp_regs);                 \
+               restore_fp_ctl(&next->thread.fp_regs.fpc);              \
+               restore_fp_regs(next->thread.fp_regs.fprs);             \
                restore_access_regs(&next->thread.acrs[0]);             \
                restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
                update_cr_regs(next);                                   \
index 8ad8af915032a32573e06548e90d89ea7563349c..819b94d2272054d318fdd12c31b8718a9e4bb0bd 100644 (file)
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_tod_clock(void)
-{
-       unsigned long long clk;
-
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-       asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
-#else
-       asm volatile("stck %0" : "=Q" (clk) : : "cc");
-#endif
-       return clk;
-}
-
 static inline void get_tod_clock_ext(char *clk)
 {
        asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_tod_clock_xt(void)
+static inline unsigned long long get_tod_clock(void)
 {
        unsigned char clk[16];
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
 
+static inline unsigned long long get_tod_clock_fast(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+       unsigned long long clk;
+
+       asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+       return clk;
+#else
+       return get_tod_clock();
+#endif
+}
+
 static inline cycles_t get_cycles(void)
 {
        return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
  */
 static inline unsigned long long get_tod_clock_monotonic(void)
 {
-       return get_tod_clock_xt() - sched_clock_base_cc;
+       return get_tod_clock() - sched_clock_base_cc;
 }
 
 /**
index 9c33ed4e666f5cd920933f6ad9fa5463d83dd1c1..79330af9a5f85442745110001defbaa2a1964bb8 100644 (file)
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
 
 struct uaccess_ops {
        size_t (*copy_from_user)(size_t, const void __user *, void *);
-       size_t (*copy_from_user_small)(size_t, const void __user *, void *);
        size_t (*copy_to_user)(size_t, void __user *, const void *);
-       size_t (*copy_to_user_small)(size_t, void __user *, const void *);
        size_t (*copy_in_user)(size_t, void __user *, const void __user *);
        size_t (*clear_user)(size_t, void __user *);
        size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
 };
 
 extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
 extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
 extern struct uaccess_ops uaccess_pt;
 
 extern int __handle_fault(unsigned long, unsigned long, int);
 
 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
 {
-       size = uaccess.copy_to_user_small(size, ptr, x);
+       size = uaccess.copy_to_user(size, ptr, x);
        return size ? -EFAULT : size;
 }
 
 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 {
-       size = uaccess.copy_from_user_small(size, ptr, x);
+       size = uaccess.copy_from_user(size, ptr, x);
        return size ? -EFAULT : size;
 }
 
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
 static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (__builtin_constant_p(n) && (n <= 256))
-               return uaccess.copy_to_user_small(n, to, from);
-       else
-               return uaccess.copy_to_user(n, to, from);
+       return uaccess.copy_to_user(n, to, from);
 }
 
 #define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (__builtin_constant_p(n) && (n <= 256))
-               return uaccess.copy_from_user_small(n, from, to);
-       else
-               return uaccess.copy_from_user(n, from, to);
+       return uaccess.copy_from_user(n, from, to);
 }
 
 extern void copy_from_user_overflow(void)
index 7a84619e315e804af6d7c6a535c4df7981e082e3..7e0b498a2c2ba95c8ca56537e673b18c4a0065d3 100644 (file)
@@ -199,6 +199,7 @@ typedef union
 typedef struct
 {
        __u32   fpc;
+       __u32   pad;
        freg_t  fprs[NUM_FPRS];              
 } s390_fp_regs;
 
@@ -206,7 +207,6 @@ typedef struct
 #define FPC_FLAGS_MASK          0x00F80000
 #define FPC_DXC_MASK            0x0000FF00
 #define FPC_RM_MASK             0x00000003
-#define FPC_VALID_MASK          0xF8F8FF03
 
 /* this typedef defines how a Program Status Word looks like */
 typedef struct 
@@ -263,7 +263,7 @@ typedef struct
 #define PSW_MASK_EA            0x0000000100000000UL
 #define PSW_MASK_BA            0x0000000080000000UL
 
-#define PSW_MASK_USER          0x0000FF8180000000UL
+#define PSW_MASK_USER          0x0000FF0180000000UL
 
 #define PSW_ADDR_AMODE         0x0000000000000000UL
 #define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
index 584787f6ce44d88569b964bf8543a5c74475e030..b30de9c01bbedad00c5e15ff52ffa8d19b06c824 100644 (file)
@@ -49,6 +49,7 @@ typedef struct
 typedef struct
 {
        unsigned int fpc;
+       unsigned int pad;
        double   fprs[__NUM_FPRS];
 } _s390_fp_regs;
 
index 92494494692eca965ee1d315d42a800e08c47605..c286c2e868f03f9683481b82689af19f1a222e2d 100644 (file)
@@ -82,4 +82,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index 4bb2a46561631ab361c027fdcfc8a1dfd0504cb5..2403303cfed708d3ae73a0b0091cab67c21f6f9f 100644 (file)
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o               += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 
-obj-y  := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y  := traps.o time.o process.o base.o early.o setup.o vtime.o
 obj-y  += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
 obj-y  += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
 obj-y  += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644 (file)
index 102da5e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *    Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
- *    See include/asm/{bitops.h|posix_types.h} for details
- *
- *    Copyright IBM Corp. 1999, 2009
- *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-EXPORT_SYMBOL(_oi_bitmap);
-
-const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
-EXPORT_SYMBOL(_ni_bitmap);
-
-const char _zb_findmap[] = {
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
-EXPORT_SYMBOL(_zb_findmap);
-
-const char _sb_findmap[] = {
-       8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
-EXPORT_SYMBOL(_sb_findmap);
index dd62071624be349ca1913ed4404e826ecd6b673e..3a414c0f93edcd08d69d3a1aa2af645c098327c9 100644 (file)
@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        for (level = 0; level < CACHE_MAX_LEVEL; level++) {
                switch (ct.ci[level].scope) {
-               case CACHE_SCOPE_NOTEXISTS:
-               case CACHE_SCOPE_RESERVED:
-                       return;
                case CACHE_SCOPE_SHARED:
                        private = 0;
                        break;
                case CACHE_SCOPE_PRIVATE:
                        private = 1;
                        break;
+               default:
+                       return;
                }
                if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
                        rc  = cache_add(level, private, CACHE_TYPE_DATA);
index 1f1b8c70ab97ce9e5b9445d5dc020f8a75bfac77..e030d2bdec1b6aa2b9f29288b28c6600710ecfd1 100644 (file)
 
 #include "compat_linux.h"
 
-u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
-                     PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
-                     PSW32_MASK_PSTATE | PSW32_ASC_HOME;
 /* For this source file, we want overflow handling. */
 
 #undef high2lowuid
index 976518c0592aa96a29565bac78d58fa8fcfdd846..1bfda3eca37909988c26564b11398b6b75302c50 100644 (file)
@@ -27,6 +27,7 @@ typedef union
 typedef struct
 {
        unsigned int    fpc;
+       unsigned int    pad;
        freg_t32        fprs[__NUM_FPRS];              
 } _s390_fp_regs32;
 
index 1389b637dae55018e3eab8b18dd3f44e9440e078..5a3ab5c191fdaf7f370a14cc69d52eb9d3052647 100644 (file)
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,62 +148,71 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
 {
-       _s390_regs_common32 regs32;
-       int err, i;
+       _sigregs32 user_sregs;
+       int i;
 
-       regs32.psw.mask = psw32_user_bits |
-               ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER);
-       regs32.psw.addr = (__u32) regs->psw.addr |
+       user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
+       user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
+       user_sregs.regs.psw.mask |= PSW32_USER_BITS;
+       user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
                (__u32)(regs->psw.mask & PSW_MASK_BA);
        for (i = 0; i < NUM_GPRS; i++)
-               regs32.gprs[i] = (__u32) regs->gprs[i];
+               user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
        save_access_regs(current->thread.acrs);
-       memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
-       err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
-       if (err)
-               return err;
-       save_fp_regs(&current->thread.fp_regs);
-       /* s390_fp_regs and _s390_fp_regs32 are the same ! */
-       return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
-                             sizeof(_s390_fp_regs32));
+       memcpy(&user_sregs.regs.acrs, current->thread.acrs,
+              sizeof(user_sregs.regs.acrs));
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
+       memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
+              sizeof(user_sregs.fpregs));
+       if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
+               return -EFAULT;
+       return 0;
 }
 
 static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
 {
-       _s390_regs_common32 regs32;
-       int err, i;
+       _sigregs32 user_sregs;
+       int i;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
-       if (err)
-               return err;
+       if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
+               return -EFAULT;
+
+       if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
+               return -EINVAL;
+
+       /* Loading the floating-point-control word can fail. Do that first. */
+       if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+               return -EINVAL;
+
+       /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
-               (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
-               (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+               (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
+               (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
+               (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
        /* Check for invalid user address space control. */
-       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
-               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+       if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+               regs->psw.mask = PSW_ASC_PRIMARY |
                        (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+       regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
        for (i = 0; i < NUM_GPRS; i++)
-               regs->gprs[i] = (__u64) regs32.gprs[i];
-       memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+               regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
+       memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
+              sizeof(current->thread.acrs));
        restore_access_regs(current->thread.acrs);
 
-       err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
-                              sizeof(_s390_fp_regs32));
-       current->thread.fp_regs.fpc &= FPC_VALID_MASK;
-       if (err)
-               return err;
+       memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
+              sizeof(current->thread.fp_regs));
 
-       restore_fp_regs(&current->thread.fp_regs);
+       restore_fp_regs(current->thread.fp_regs.fprs);
        clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
        return 0;
 }
@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
 
        for (i = 0; i < NUM_GPRS; i++)
                gprs_high[i] = regs->gprs[i] >> 32;
-
-       return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high));
+       if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
+               return -EFAULT;
+       return 0;
 }
 
 static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
 {
        __u32 gprs_high[NUM_GPRS];
-       int err, i;
+       int i;
 
-       err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high));
-       if (err)
-               return err;
+       if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
+               return -EFAULT;
        for (i = 0; i < NUM_GPRS; i++)
                *(__u32 *)&regs->gprs[i] = gprs_high[i];
        return 0;
@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
        regs->gprs[15] = (__force __u64) frame;
        /* Force 31 bit amode and default user address space control. */
        regs->psw.mask = PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__force __u64) ka->sa.sa_handler;
 
@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->gprs[15] = (__force __u64) frame;
        /* Force 31 bit amode and default user address space control. */
        regs->psw.mask = PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
 
index c84f33d51f7b45d92d66b870f9b35c23d5054568..f45b2ab0cb81ae425ce2d72c2de9b6fa4fa241a3 100644 (file)
 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
 #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
 
+struct dump_save_areas dump_save_areas;
+
+/*
+ * Allocate and add a save area for a CPU
+ */
+struct save_area *dump_save_area_create(int cpu)
+{
+       struct save_area **save_areas, *save_area;
+
+       save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
+       if (!save_area)
+               return NULL;
+       if (cpu + 1 > dump_save_areas.count) {
+               dump_save_areas.count = cpu + 1;
+               save_areas = krealloc(dump_save_areas.areas,
+                                     dump_save_areas.count * sizeof(void *),
+                                     GFP_KERNEL | __GFP_ZERO);
+               if (!save_areas) {
+                       kfree(save_area);
+                       return NULL;
+               }
+               dump_save_areas.areas = save_areas;
+       }
+       dump_save_areas.areas[cpu] = save_area;
+       return save_area;
+}
 
 /*
  * Return physical address for virtual address
@@ -40,28 +66,25 @@ static inline void *load_real_addr(void *addr)
 }
 
 /*
- * Copy up to one page to vmalloc or real memory
+ * Copy real to virtual or real memory
  */
-static ssize_t copy_page_real(void *buf, void *src, size_t csize)
+static int copy_from_realmem(void *dest, void *src, size_t count)
 {
-       size_t size;
+       unsigned long size;
 
-       if (is_vmalloc_addr(buf)) {
-               BUG_ON(csize >= PAGE_SIZE);
-               /* If buf is not page aligned, copy first part */
-               size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
-               if (size) {
-                       if (memcpy_real(load_real_addr(buf), src, size))
-                               return -EFAULT;
-                       buf += size;
-                       src += size;
-               }
-               /* Copy second part */
-               size = csize - size;
-               return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
-       } else {
-               return memcpy_real(buf, src, csize);
-       }
+       if (!count)
+               return 0;
+       if (!is_vmalloc_or_module_addr(dest))
+               return memcpy_real(dest, src, count);
+       do {
+               size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
+               if (memcpy_real(load_real_addr(dest), src, size))
+                       return -EFAULT;
+               count -= size;
+               dest += size;
+               src += size;
+       } while (count);
+       return 0;
 }
 
 /*
@@ -114,7 +137,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
                rc = copy_to_user_real((void __force __user *) buf,
                                       (void *) src, csize);
        else
-               rc = copy_page_real(buf, (void *) src, csize);
+               rc = copy_from_realmem(buf, (void *) src, csize);
        return (rc == 0) ? rc : csize;
 }
 
@@ -210,7 +233,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
        if (OLDMEM_BASE) {
                if ((unsigned long) src < OLDMEM_SIZE) {
                        copied = min(count, OLDMEM_SIZE - (unsigned long) src);
-                       rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+                       rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
                        if (rc)
                                return rc;
                }
@@ -223,7 +246,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
                                return rc;
                }
        }
-       return memcpy_real(dest + copied, src + copied, count - copied);
+       return copy_from_realmem(dest + copied, src + copied, count - copied);
 }
 
 /*
@@ -453,8 +476,8 @@ static int get_cpu_cnt(void)
 {
        int i, cpus = 0;
 
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               if (zfcpdump_save_areas[i]->pref_reg == 0)
+       for (i = 0; i < dump_save_areas.count; i++) {
+               if (dump_save_areas.areas[i]->pref_reg == 0)
                        continue;
                cpus++;
        }
@@ -525,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 
        ptr = nt_prpsinfo(ptr);
 
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               sa = zfcpdump_save_areas[i];
+       for (i = 0; i < dump_save_areas.count; i++) {
+               sa = dump_save_areas.areas[i];
                if (sa->pref_reg == 0)
                        continue;
                ptr = fill_cpu_elf_notes(ptr, sa);
index f1279dc2e1bcecae2a12ea246096f1458b2de77f..17d62fe5d7b70554c36b8f85b34da23d86c90407 100644 (file)
@@ -867,7 +867,7 @@ static inline void
 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
                        int exception)
 {
-       active->id.stck = get_tod_clock();
+       active->id.stck = get_tod_clock_fast();
        active->id.fields.cpuid = smp_processor_id();
        active->caller = __builtin_return_address(0);
        active->id.fields.exception = exception;
index be87d3e05a5be69265a6100f87afe2fa60d51137..993efe6a887c2c31d4bcd90b02bccbb3c0234ae9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kdebug.h>
 
 #include <asm/uaccess.h>
+#include <asm/dis.h>
 #include <asm/io.h>
 #include <linux/atomic.h>
 #include <asm/mathemu.h>
 #define ONELONG "%016lx: "
 #endif /* CONFIG_64BIT */
 
-#define OPERAND_GPR    0x1     /* Operand printed as %rx */
-#define OPERAND_FPR    0x2     /* Operand printed as %fx */
-#define OPERAND_AR     0x4     /* Operand printed as %ax */
-#define OPERAND_CR     0x8     /* Operand printed as %cx */
-#define OPERAND_DISP   0x10    /* Operand printed as displacement */
-#define OPERAND_BASE   0x20    /* Operand printed as base register */
-#define OPERAND_INDEX  0x40    /* Operand printed as index register */
-#define OPERAND_PCREL  0x80    /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100   /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200   /* Operand printed as length (+1) */
-
 enum {
        UNUSED, /* Indicates the end of the operand list */
        R_8,    /* GPR starting at position 8 */
@@ -155,19 +145,7 @@ enum {
        INSTR_S_00, INSTR_S_RD,
 };
 
-struct operand {
-       int bits;               /* The number of bits in the operand. */
-       int shift;              /* The number of bits to shift. */
-       int flags;              /* One bit syntax flags. */
-};
-
-struct insn {
-       const char name[5];
-       unsigned char opfrag;
-       unsigned char format;
-};
-
-static const struct operand operands[] =
+static const struct s390_operand operands[] =
 {
        [UNUSED]  = { 0, 0, 0 },
        [R_8]    = {  4,  8, OPERAND_GPR },
@@ -479,7 +457,7 @@ static char *long_insn_name[] = {
        [LONG_INSN_PCISTB] = "pcistb",
 };
 
-static struct insn opcode[] = {
+static struct s390_insn opcode[] = {
 #ifdef CONFIG_64BIT
        { "bprp", 0xc5, INSTR_MII_UPI },
        { "bpp", 0xc7, INSTR_SMI_U0RDP },
@@ -668,7 +646,7 @@ static struct insn opcode[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_01[] = {
+static struct s390_insn opcode_01[] = {
 #ifdef CONFIG_64BIT
        { "ptff", 0x04, INSTR_E },
        { "pfpo", 0x0a, INSTR_E },
@@ -684,7 +662,7 @@ static struct insn opcode_01[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_a5[] = {
+static struct s390_insn opcode_a5[] = {
 #ifdef CONFIG_64BIT
        { "iihh", 0x00, INSTR_RI_RU },
        { "iihl", 0x01, INSTR_RI_RU },
@@ -706,7 +684,7 @@ static struct insn opcode_a5[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_a7[] = {
+static struct s390_insn opcode_a7[] = {
 #ifdef CONFIG_64BIT
        { "tmhh", 0x02, INSTR_RI_RU },
        { "tmhl", 0x03, INSTR_RI_RU },
@@ -728,7 +706,7 @@ static struct insn opcode_a7[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_aa[] = {
+static struct s390_insn opcode_aa[] = {
 #ifdef CONFIG_64BIT
        { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
        { "rion", 0x01, INSTR_RI_RI },
@@ -739,7 +717,7 @@ static struct insn opcode_aa[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b2[] = {
+static struct s390_insn opcode_b2[] = {
 #ifdef CONFIG_64BIT
        { "stckf", 0x7c, INSTR_S_RD },
        { "lpp", 0x80, INSTR_S_RD },
@@ -851,7 +829,7 @@ static struct insn opcode_b2[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b3[] = {
+static struct s390_insn opcode_b3[] = {
 #ifdef CONFIG_64BIT
        { "maylr", 0x38, INSTR_RRF_F0FF },
        { "mylr", 0x39, INSTR_RRF_F0FF },
@@ -1034,7 +1012,7 @@ static struct insn opcode_b3[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b9[] = {
+static struct s390_insn opcode_b9[] = {
 #ifdef CONFIG_64BIT
        { "lpgr", 0x00, INSTR_RRE_RR },
        { "lngr", 0x01, INSTR_RRE_RR },
@@ -1167,7 +1145,7 @@ static struct insn opcode_b9[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c0[] = {
+static struct s390_insn opcode_c0[] = {
 #ifdef CONFIG_64BIT
        { "lgfi", 0x01, INSTR_RIL_RI },
        { "xihf", 0x06, INSTR_RIL_RU },
@@ -1187,7 +1165,7 @@ static struct insn opcode_c0[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c2[] = {
+static struct s390_insn opcode_c2[] = {
 #ifdef CONFIG_64BIT
        { "msgfi", 0x00, INSTR_RIL_RI },
        { "msfi", 0x01, INSTR_RIL_RI },
@@ -1205,7 +1183,7 @@ static struct insn opcode_c2[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c4[] = {
+static struct s390_insn opcode_c4[] = {
 #ifdef CONFIG_64BIT
        { "llhrl", 0x02, INSTR_RIL_RP },
        { "lghrl", 0x04, INSTR_RIL_RP },
@@ -1222,7 +1200,7 @@ static struct insn opcode_c4[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c6[] = {
+static struct s390_insn opcode_c6[] = {
 #ifdef CONFIG_64BIT
        { "exrl", 0x00, INSTR_RIL_RP },
        { "pfdrl", 0x02, INSTR_RIL_UP },
@@ -1240,7 +1218,7 @@ static struct insn opcode_c6[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c8[] = {
+static struct s390_insn opcode_c8[] = {
 #ifdef CONFIG_64BIT
        { "mvcos", 0x00, INSTR_SSF_RRDRD },
        { "ectg", 0x01, INSTR_SSF_RRDRD },
@@ -1251,7 +1229,7 @@ static struct insn opcode_c8[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_cc[] = {
+static struct s390_insn opcode_cc[] = {
 #ifdef CONFIG_64BIT
        { "brcth", 0x06, INSTR_RIL_RP },
        { "aih", 0x08, INSTR_RIL_RI },
@@ -1263,7 +1241,7 @@ static struct insn opcode_cc[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_e3[] = {
+static struct s390_insn opcode_e3[] = {
 #ifdef CONFIG_64BIT
        { "ltg", 0x02, INSTR_RXY_RRRD },
        { "lrag", 0x03, INSTR_RXY_RRRD },
@@ -1369,7 +1347,7 @@ static struct insn opcode_e3[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_e5[] = {
+static struct s390_insn opcode_e5[] = {
 #ifdef CONFIG_64BIT
        { "strag", 0x02, INSTR_SSE_RDRD },
        { "mvhhi", 0x44, INSTR_SIL_RDI },
@@ -1391,7 +1369,7 @@ static struct insn opcode_e5[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_eb[] = {
+static struct s390_insn opcode_eb[] = {
 #ifdef CONFIG_64BIT
        { "lmg", 0x04, INSTR_RSY_RRRD },
        { "srag", 0x0a, INSTR_RSY_RRRD },
@@ -1465,7 +1443,7 @@ static struct insn opcode_eb[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_ec[] = {
+static struct s390_insn opcode_ec[] = {
 #ifdef CONFIG_64BIT
        { "brxhg", 0x44, INSTR_RIE_RRP },
        { "brxlg", 0x45, INSTR_RIE_RRP },
@@ -1504,7 +1482,7 @@ static struct insn opcode_ec[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_ed[] = {
+static struct s390_insn opcode_ed[] = {
 #ifdef CONFIG_64BIT
        { "mayl", 0x38, INSTR_RXF_FRRDF },
        { "myl", 0x39, INSTR_RXF_FRRDF },
@@ -1572,7 +1550,7 @@ static struct insn opcode_ed[] = {
 
 /* Extracts an operand value from an instruction.  */
 static unsigned int extract_operand(unsigned char *code,
-                                   const struct operand *operand)
+                                   const struct s390_operand *operand)
 {
        unsigned int val;
        int bits;
@@ -1608,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code,
        return val;
 }
 
-static inline int insn_length(unsigned char code)
-{
-       return ((((int) code + 64) >> 7) + 1) << 1;
-}
-
-static struct insn *find_insn(unsigned char *code)
+struct s390_insn *find_insn(unsigned char *code)
 {
        unsigned char opfrag = code[1];
        unsigned char opmask;
-       struct insn *table;
+       struct s390_insn *table;
 
        switch (code[0]) {
        case 0x01:
@@ -1706,7 +1679,7 @@ static struct insn *find_insn(unsigned char *code)
  */
 int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
 {
-       struct insn *insn;
+       struct s390_insn *insn;
 
        insn = find_insn(instruction);
        if (!insn)
@@ -1722,9 +1695,9 @@ EXPORT_SYMBOL_GPL(insn_to_mnemonic);
 
 static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
 {
-       struct insn *insn;
+       struct s390_insn *insn;
        const unsigned char *ops;
-       const struct operand *operand;
+       const struct s390_operand *operand;
        unsigned int value;
        char separator;
        char *ptr;
index 99e7f6035895e0cceccbc0ae6bf123a871a5d6a3..e6af9406987c9982689e5f500612f14d49c1b0b0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <asm/processor.h>
 #include <asm/debug.h>
+#include <asm/dis.h>
 #include <asm/ipl.h>
 
 #ifndef CONFIG_64BIT
index dc8770d7173c83aec5b3475d748eee99d99e60b7..96543ac400a7820ede40f22bf9636753920456b7 100644 (file)
@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
  */
 static noinline __init void init_kernel_storage_key(void)
 {
+#if PAGE_DEFAULT_KEY
        unsigned long end_pfn, init_pfn;
 
        end_pfn = PFN_UP(__pa(&_end));
@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
        for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
                page_set_storage_key(init_pfn << PAGE_SHIFT,
                                     PAGE_DEFAULT_KEY, 0);
+#endif
 }
 
 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
index cc30d1fb000c25c8f74a8045b105762ccf32c9b2..0dc2b6d0a1ec8557f7450d5fbd255d2758bf8512 100644 (file)
@@ -266,6 +266,7 @@ sysc_sigpending:
        tm      __TI_flags+3(%r12),_TIF_SYSCALL
        jno     sysc_return
        lm      %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       l       %r10,__TI_sysc_table(%r12)      # 31 bit system call table
        xr      %r8,%r8                 # svc 0 returns -ENOSYS
        clc     __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
        jnl     sysc_nr_ok              # invalid svc number -> do svc 0
index 2b2188b97c6aff464e467b7250823257924e31ab..e5b43c97a8340a807671ace329a4fa87a573709a 100644 (file)
@@ -297,6 +297,7 @@ sysc_sigpending:
        tm      __TI_flags+7(%r12),_TIF_SYSCALL
        jno     sysc_return
        lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
        lghi    %r8,0                   # svc 0 returns -ENOSYS
        llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
        cghi    %r1,NR_syscalls
index 1014ad5f7693eda79c3caa4ad8b7e8b5edb5f3fc..224db03e95182adc3976aecb3b20f0de5d2b0b13 100644 (file)
@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
        ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
-       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
-               goto out;
        trace.func = ip;
+       trace.depth = current->curr_ret_stack + 1;
        /* Only trace if the calling function expects to. */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
+       if (!ftrace_graph_entry(&trace))
+               goto out;
+       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
                goto out;
-       }
        parent = (unsigned long) return_to_handler;
 out:
        return parent;
index fd8db63dfc942a211d9dfe95b8c2bff88b90ed8d..429afcc480cb2e86c7e684e81c6f1f1160ee3b05 100644 (file)
@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+       .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
        .long 2, 0xc100efe3, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
index feb719d3c85160b586dd6af4d936f363936c4419..633ca7504536c10a517667b5f582653f763390e3 100644 (file)
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
        __ctl_clear_bit(0,28);
 
        /* Set new machine check handler */
-       S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+       S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.mcck_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
 
        /* Set new program check handler */
-       S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+       S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
 
index 0ce9fb245034d67bf8f44dd7c9234ef954679a51..59a9c35c4598ae3265c60f69f8f87ca0a1538c2d 100644 (file)
 #include <linux/stop_machine.h>
 #include <linux/kdebug.h>
 #include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-#include <asm/sections.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/hardirq.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/dis.h>
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -59,6 +60,8 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
 
 static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
 {
+       if (!is_known_insn((unsigned char *)insn))
+               return -EINVAL;
        switch (insn[0] >> 8) {
        case 0x0c:      /* bassm */
        case 0x0b:      /* bsm   */
@@ -67,6 +70,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
        case 0xac:      /* stnsm */
        case 0xad:      /* stosm */
                return -EINVAL;
+       case 0xc6:
+               switch (insn[0] & 0x0f) {
+               case 0x00: /* exrl   */
+                       return -EINVAL;
+               }
        }
        switch (insn[0]) {
        case 0x0101:    /* pr    */
@@ -180,7 +188,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
                break;
        case 0xc6:
                switch (insn[0] & 0x0f) {
-               case 0x00: /* exrl   */
                case 0x02: /* pfdrl  */
                case 0x04: /* cghrl  */
                case 0x05: /* chrl   */
@@ -204,7 +211,7 @@ static void __kprobes copy_instruction(struct kprobe *p)
        s64 disp, new_disp;
        u64 addr, new_addr;
 
-       memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+       memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
        if (!is_insn_relative_long(p->ainsn.insn))
                return;
        /*
@@ -248,7 +255,7 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
        p->ainsn.insn = NULL;
        if (is_kernel_addr(p->addr))
                p->ainsn.insn = get_dmainsn_slot();
-       if (is_module_addr(p->addr))
+       else if (is_module_addr(p->addr))
                p->ainsn.insn = get_insn_slot();
        return p->ainsn.insn ? 0 : -ENOMEM;
 }
@@ -604,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
-               int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
+               int ilen = insn_length(p->ainsn.insn[0] >> 8);
                if (ip - (unsigned long) p->ainsn.insn == ilen)
                        ip = (unsigned long) p->addr + ilen;
        }
index c5dbb335716d5e2cdc864fc6b189a46af74ba5d3..7ed0d4e2a435452733173767b56af52163334980 100644 (file)
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
        if (unlikely(p->flags & PF_KTHREAD)) {
                /* kernel thread */
                memset(&frame->childregs, 0, sizeof(struct pt_regs));
-               frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT |
+               frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
                                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
                frame->childregs.psw.addr = PSW_ADDR_AMODE |
                                (unsigned long) kernel_thread_starter;
@@ -165,7 +165,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
         * save fprs to current->thread.fp_regs to merge them with
         * the emulated registers and then copy the result to the child.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
               sizeof(s390_fp_regs));
        /* Set a new TLS ?  */
@@ -173,7 +174,9 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
                p->thread.acrs[0] = frame->childregs.gprs[6];
 #else /* CONFIG_64BIT */
        /* Save the fpu registers to new thread structure. */
-       save_fp_regs(&p->thread.fp_regs);
+       save_fp_ctl(&p->thread.fp_regs.fpc);
+       save_fp_regs(p->thread.fp_regs.fprs);
+       p->thread.fp_regs.pad = 0;
        /* Set a new TLS ?  */
        if (clone_flags & CLONE_SETTLS) {
                unsigned long tls = frame->childregs.gprs[6];
@@ -205,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
         * save fprs to current->thread.fp_regs to merge them with
         * the emulated registers and then copy the result to the dump.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
 #else /* CONFIG_64BIT */
-       save_fp_regs(fpregs);
+       save_fp_ctl(&fpregs->fpc);
+       save_fp_regs(fpregs->fprs);
 #endif /* CONFIG_64BIT */
        return 1;
 }
index 9556905bd3ce42c046052a54aa32ed5b07d9d559..e65c91c591e8b99e4189f31b403a35ad06837f4f 100644 (file)
@@ -198,9 +198,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
                 * psw and gprs are stored on the stack
                 */
                tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
-               if (addr == (addr_t) &dummy->regs.psw.mask)
+               if (addr == (addr_t) &dummy->regs.psw.mask) {
                        /* Return a clean psw mask. */
-                       tmp = psw_user_bits | (tmp & PSW_MASK_USER);
+                       tmp &= PSW_MASK_USER | PSW_MASK_RI;
+                       tmp |= PSW_USER_BITS;
+               }
 
        } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
                /*
@@ -239,8 +241,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
                offset = addr - (addr_t) &dummy->regs.fp_regs;
                tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
                if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
-                       tmp &= (unsigned long) FPC_VALID_MASK
-                               << (BITS_PER_LONG - 32);
+                       tmp <<= BITS_PER_LONG - 32;
 
        } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
                /*
@@ -321,11 +322,15 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * psw and gprs are stored on the stack
                 */
-               if (addr == (addr_t) &dummy->regs.psw.mask &&
-                   ((data & ~PSW_MASK_USER) != psw_user_bits ||
-                    ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
-                       /* Invalid psw mask. */
-                       return -EINVAL;
+               if (addr == (addr_t) &dummy->regs.psw.mask) {
+                       unsigned long mask = PSW_MASK_USER;
+
+                       mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
+                       if ((data & ~mask) != PSW_USER_BITS)
+                               return -EINVAL;
+                       if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               return -EINVAL;
+               }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -363,10 +368,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * floating point regs. are stored in the thread structure
                 */
-               if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
-                   (data & ~((unsigned long) FPC_VALID_MASK
-                             << (BITS_PER_LONG - 32))) != 0)
-                       return -EINVAL;
+               if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
+                       if ((unsigned int) data != 0 ||
+                           test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+                               return -EINVAL;
                offset = addr - (addr_t) &dummy->regs.fp_regs;
                *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
 
@@ -557,7 +562,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
                        /* Fake a 31 bit psw mask. */
                        tmp = (__u32)(regs->psw.mask >> 32);
-                       tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
+                       tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
+                       tmp |= PSW32_USER_BITS;
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Fake a 31 bit psw address. */
                        tmp = (__u32) regs->psw.addr |
@@ -654,13 +660,16 @@ static int __poke_user_compat(struct task_struct *child,
                 * psw, gprs, acrs and orig_gpr2 are stored on the stack
                 */
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
+                       __u32 mask = PSW32_MASK_USER;
+
+                       mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+                       if ((tmp & ~mask) != PSW32_USER_BITS)
                                /* Invalid psw mask. */
                                return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
-                               (__u64)(tmp & PSW32_MASK_USER) << 32;
+                               (__u64)(tmp & mask) << 32;
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Build a 64 bit psw address from 31 bit address. */
                        regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
@@ -696,8 +705,7 @@ static int __poke_user_compat(struct task_struct *child,
                 * floating point regs. are stored in the thread structure 
                 */
                if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
-                   (tmp & ~FPC_VALID_MASK) != 0)
-                       /* Invalid floating point control. */
+                   test_fp_ctl(tmp))
                        return -EINVAL;
                offset = addr - (addr_t) &dummy32->regs.fp_regs;
                *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
@@ -895,8 +903,10 @@ static int s390_fpregs_get(struct task_struct *target,
                           const struct user_regset *regset, unsigned int pos,
                           unsigned int count, void *kbuf, void __user *ubuf)
 {
-       if (target == current)
-               save_fp_regs(&target->thread.fp_regs);
+       if (target == current) {
+               save_fp_ctl(&target->thread.fp_regs.fpc);
+               save_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_regs, 0, -1);
@@ -909,19 +919,21 @@ static int s390_fpregs_set(struct task_struct *target,
 {
        int rc = 0;
 
-       if (target == current)
-               save_fp_regs(&target->thread.fp_regs);
+       if (target == current) {
+               save_fp_ctl(&target->thread.fp_regs.fpc);
+               save_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
-               u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
-               rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
+               u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
+               rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
                                        0, offsetof(s390_fp_regs, fprs));
                if (rc)
                        return rc;
-               if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
+               if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
                        return -EINVAL;
-               target->thread.fp_regs.fpc = fpc[0];
+               target->thread.fp_regs.fpc = ufpc[0];
        }
 
        if (rc == 0 && count > 0)
@@ -929,8 +941,10 @@ static int s390_fpregs_set(struct task_struct *target,
                                        target->thread.fp_regs.fprs,
                                        offsetof(s390_fp_regs, fprs), -1);
 
-       if (rc == 0 && target == current)
-               restore_fp_regs(&target->thread.fp_regs);
+       if (rc == 0 && target == current) {
+               restore_fp_ctl(&target->thread.fp_regs.fpc);
+               restore_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        return rc;
 }
index e1c9d1c292fa2ce4afa9a7d777b2d1db08024044..d817cce7e72de862081f57a2ef6b0936c64fcc60 100644 (file)
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
 static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
 {
        cb->buf_limit = 0xfff;
-       if (s390_user_mode == HOME_SPACE_MODE)
-               cb->home_space = 1;
        cb->int_requested = 1;
        cb->pstate = 1;
        cb->pstate_set_buf = 1;
index aeed8a61fa0d4f1b4862a98cab44e29beda63699..ffe1c53264a708352622d0159f437f1e176d2a75 100644 (file)
 #include <asm/sclp.h>
 #include "entry.h"
 
-long psw_kernel_bits   = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
-                         PSW_MASK_EA | PSW_MASK_BA;
-long psw_user_bits     = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
-                         PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
-                         PSW_MASK_PSTATE | PSW_ASC_HOME;
-
 /*
  * User copy operations.
  */
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
 }
 early_param("vmalloc", parse_vmalloc);
 
-unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
-EXPORT_SYMBOL_GPL(s390_user_mode);
-
-static void __init set_user_mode_primary(void)
-{
-       psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
-       psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
-#ifdef CONFIG_COMPAT
-       psw32_user_bits =
-               (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
-#endif
-       uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
-}
-
 static int __init early_parse_user_mode(char *p)
 {
-       if (p && strcmp(p, "primary") == 0)
-               s390_user_mode = PRIMARY_SPACE_MODE;
-       else if (!p || strcmp(p, "home") == 0)
-               s390_user_mode = HOME_SPACE_MODE;
-       else
-               return 1;
-       return 0;
+       if (!p || strcmp(p, "primary") == 0)
+               return 0;
+       return 1;
 }
 early_param("user_mode", early_parse_user_mode);
 
-static void __init setup_addressing_mode(void)
-{
-       if (s390_user_mode != PRIMARY_SPACE_MODE)
-               return;
-       set_user_mode_primary();
-       if (MACHINE_HAS_MVCOS)
-               pr_info("Address spaces switched, mvcos available\n");
-       else
-               pr_info("Address spaces switched, mvcos not available\n");
-}
-
 void *restart_stack __attribute__((__section__(".data")));
 
 static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
         */
        BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
        lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
-       lc->restart_psw.mask = psw_kernel_bits;
+       lc->restart_psw.mask = PSW_KERNEL_BITS;
        lc->restart_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
-       lc->external_new_psw.mask = psw_kernel_bits |
+       lc->external_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->external_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
-       lc->svc_new_psw.mask = psw_kernel_bits |
+       lc->svc_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
-       lc->program_new_psw.mask = psw_kernel_bits |
+       lc->program_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
-       lc->mcck_new_psw.mask = psw_kernel_bits;
+       lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
        lc->mcck_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
-       lc->io_new_psw.mask = psw_kernel_bits |
+       lc->io_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
        lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) &_edata;
        init_mm.brk = (unsigned long) &_end;
 
-       if (MACHINE_HAS_MVCOS)
-               memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
-       else
-               memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
+       uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
 
        parse_early_param();
        detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
        setup_ipl();
        reserve_oldmem();
        setup_memory_end();
-       setup_addressing_mode();
        reserve_crashkernel();
        setup_memory();
        setup_resources();
index c45becf82e0179e17fcc9b1372b1ba25ef7c2131..fb535874a2464853168c9a9182620acf10e37c74 100644 (file)
@@ -57,40 +57,48 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 
        /* Copy a 'clean' PSW mask to the user to avoid leaking
           information about whether PER is currently on.  */
-       user_sregs.regs.psw.mask = psw_user_bits |
-               (regs->psw.mask & PSW_MASK_USER);
+       user_sregs.regs.psw.mask = PSW_USER_BITS |
+               (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
        user_sregs.regs.psw.addr = regs->psw.addr;
        memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
        memcpy(&user_sregs.regs.acrs, current->thread.acrs,
-              sizeof(sregs->regs.acrs));
+              sizeof(user_sregs.regs.acrs));
        /* 
         * We have to store the fp registers to current->thread.fp_regs
         * to merge them with the emulated registers.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
-              sizeof(s390_fp_regs));
-       return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
+              sizeof(user_sregs.fpregs));
+       if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
+               return -EFAULT;
+       return 0;
 }
 
-/* Returns positive number on error */
 static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 {
-       int err;
        _sigregs user_sregs;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
-       if (err)
-               return err;
-       /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+       if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
+               return -EFAULT;
+
+       if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
+               return -EINVAL;
+
+       /* Loading the floating-point-control word can fail. Do that first. */
+       if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+               return -EINVAL;
+
+       /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
-               (user_sregs.regs.psw.mask & PSW_MASK_USER);
+               (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
        /* Check for invalid user address space control. */
-       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
-               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+       if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+               regs->psw.mask = PSW_ASC_PRIMARY |
                        (regs->psw.mask & ~PSW_MASK_ASC);
        /* Check for invalid amode */
        if (regs->psw.mask & PSW_MASK_EA)
@@ -98,14 +106,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
        regs->psw.addr = user_sregs.regs.psw.addr;
        memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
        memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
-              sizeof(sregs->regs.acrs));
+              sizeof(current->thread.acrs));
        restore_access_regs(current->thread.acrs);
 
        memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
-              sizeof(s390_fp_regs));
-       current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+              sizeof(current->thread.fp_regs));
 
-       restore_fp_regs(&current->thread.fp_regs);
+       restore_fp_regs(current->thread.fp_regs.fprs);
        clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
        return 0;
 }
@@ -224,7 +231,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        regs->gprs[15] = (unsigned long) frame;
        /* Force default amode and default user address space control. */
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
@@ -295,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->gprs[15] = (unsigned long) frame;
        /* Force default amode and default user address space control. */
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
index 1a4313a1b60f76e20ac50ff31b5c0d2e0bf95492..739313db71e5e2431f32c59472cdaa89ccbae2ef 100644 (file)
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
        struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
        unsigned long source_cpu = stap();
 
-       __load_psw_mask(psw_kernel_bits);
+       __load_psw_mask(PSW_KERNEL_BITS);
        if (pcpu->address == source_cpu)
                func(data);     /* should not return */
        /* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
        int cpu;
 
        /* Disable all interrupts/machine checks */
-       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        trace_hardirqs_off();
 
        debug_set_critical();
@@ -533,9 +533,6 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
 
 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
 
-struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
-
 static void __init smp_get_save_area(int cpu, u16 address)
 {
        void *lc = pcpu_devices[0].lowcore;
@@ -546,15 +543,9 @@ static void __init smp_get_save_area(int cpu, u16 address)
        if (!OLDMEM_BASE && (address == boot_cpu_address ||
                             ipl_info.type != IPL_TYPE_FCP_DUMP))
                return;
-       if (cpu >= NR_CPUS) {
-               pr_warning("CPU %i exceeds the maximum %i and is excluded "
-                          "from the dump\n", cpu, NR_CPUS - 1);
-               return;
-       }
-       save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
+       save_area = dump_save_area_create(cpu);
        if (!save_area)
                panic("could not allocate memory for save area\n");
-       zfcpdump_save_areas[cpu] = save_area;
 #ifdef CONFIG_CRASH_DUMP
        if (address == boot_cpu_address) {
                /* Copy the registers of the boot cpu. */
@@ -693,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
        S390_lowcore.restart_source = -1UL;
        restore_access_regs(S390_lowcore.access_regs_save_area);
        __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        cpu_init();
        preempt_disable();
        init_cpu_timer();
index 05d75c413137879a30fded476638b0b9c4a001f5..a84476f2a9bb3ae488eb7a936021a744a822dd31 100644 (file)
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
  */
 static void vdso_init_data(struct vdso_data *vd)
 {
-       vd->ectg_available =
-               s390_user_mode != HOME_SPACE_MODE && test_facility(31);
+       vd->ectg_available = test_facility(31);
 }
 
 #ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 
        lowcore->vdso_per_cpu_data = __LC_PASTE;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return 0;
 
        segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
        unsigned long segment_table, page_table, page_frame;
        u32 *psal, *aste;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return;
 
        psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
 {
        unsigned long cr5;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return;
        cr5 = offsetof(struct _lowcore, paste);
        __ctl_load(cr5, 5, 5);
index abcfab55f99b37e5d4f79903be716e67896d5c79..e312c48a1c405741a4e1675002aa9bf16003f821 100644 (file)
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
        trace_hardirqs_on();
 
        /* Wait for external, I/O or machine check interrupt. */
-       psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
+       psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        idle->nohz_delay = 0;
 
index 7f35cb33e5102008244b4fd1376954400f2d0009..7f1f7ac5cf7f8a2c3f3966d4fe96fa23af90ea04 100644 (file)
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        }
 
        if ((!rc) && (vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch)) {
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
                if ((!psw_extint_disabled(vcpu)) &&
                        (vcpu->arch.sie_block->gcr[0] & 0x800ul))
                        rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                goto no_timer;
        }
 
-       now = get_tod_clock() + vcpu->arch.sie_block->epoch;
+       now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
        if (vcpu->arch.sie_block->ckc < now) {
                __unset_cpu_idle(vcpu);
                return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        }
 
        if ((vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch))
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
                __try_deliver_ckc_interrupt(vcpu);
 
        if (atomic_read(&fi->active)) {
index 776dafe918db30b8c3f4823b8bf11c461bed6f5f..ed8064cb5c4921424d5981b890e6fd9b07f9ed02 100644 (file)
@@ -343,10 +343,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       save_fp_regs(&vcpu->arch.host_fpregs);
+       save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+       save_fp_regs(vcpu->arch.host_fpregs.fprs);
        save_access_regs(vcpu->arch.host_acrs);
-       vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
-       restore_fp_regs(&vcpu->arch.guest_fpregs);
+       restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
        restore_access_regs(vcpu->run->s.regs.acrs);
        gmap_enable(vcpu->arch.gmap);
        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -356,9 +357,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
-       save_fp_regs(&vcpu->arch.guest_fpregs);
+       save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       save_fp_regs(vcpu->arch.guest_fpregs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
-       restore_fp_regs(&vcpu->arch.host_fpregs);
+       restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.host_fpregs.fprs);
        restore_access_regs(vcpu->arch.host_acrs);
 }
 
@@ -618,9 +621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
+       if (test_fp_ctl(fpu->fpc))
+               return -EINVAL;
        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
-       restore_fp_regs(&vcpu->arch.guest_fpregs);
+       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+       restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
        return 0;
 }
 
@@ -876,7 +882,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * copying in vcpu load/put. Lets update our copies before we save
         * it into the save area
         */
-       save_fp_regs(&vcpu->arch.guest_fpregs);
+       save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       save_fp_regs(vcpu->arch.guest_fpregs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
 
        if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
index c2f582bb1cb262e421072a9b1500c961f4e7426a..0c991c6748ab3a4d860c890b7c1b1526808d4ed6 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/tracepoint.h>
 #include <asm/sigp.h>
 #include <asm/debug.h>
+#include <asm/dis.h>
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
index 20b0e97a7df2e204f9a680be4d95c86e0944652d..b068729e50ace9711774adab984f03a8a41e9338 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for s390-specific library files..
 #
 
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
+lib-y += delay.o string.o uaccess_pt.o find.o
 obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
 obj-$(CONFIG_64BIT) += mem64.o
 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
index 57c87d7d7ede01add784c291f31a82f502ca877e..a9f3d0042d58ba849b8c624f23915d88d03d6f70 100644 (file)
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
        do {
                set_clock_comparator(end);
                vtime_stop_cpu();
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
        lockdep_on();
        __ctl_load(cr0, 0, 0);
        __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
 {
        u64 clock_saved, end;
 
-       end = get_tod_clock() + (usecs << 12);
+       end = get_tod_clock_fast() + (usecs << 12);
        do {
                clock_saved = 0;
                if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
                vtime_stop_cpu();
                if (clock_saved)
                        local_tick_enable(clock_saved);
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
 }
 
 /*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
 {
        u64 end;
 
-       end = get_tod_clock() + (usecs << 12);
-       while (get_tod_clock() < end)
+       end = get_tod_clock_fast() + (usecs << 12);
+       while (get_tod_clock_fast() < end)
                cpu_relax();
 }
 
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
 
        nsecs <<= 9;
        do_div(nsecs, 125);
-       end = get_tod_clock() + nsecs;
+       end = get_tod_clock_fast() + nsecs;
        if (nsecs & ~0xfffUL)
                __udelay(nsecs >> 12);
-       while (get_tod_clock() < end)
+       while (get_tod_clock_fast() < end)
                barrier();
 }
 EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644 (file)
index 0000000..620d34d
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * MSB0 numbered special bitops handling.
+ *
+ * On s390x the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The reason for this bit numbering is the fact that the hardware sets bits
+ * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+ * from the 'wrong end'.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG - 1)) {
+               if ((tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
+       if (!tmp)               /* Are any bits set? */
+               return result + size;   /* Nope. */
+found:
+       return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_first_bit_inv);
+
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+                               unsigned long offset)
+{
+       const unsigned long *p = addr + (offset / BITS_PER_LONG);
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL >> offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+found_first:
+       tmp &= (~0UL << (BITS_PER_LONG - size));
+       if (!tmp)               /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_next_bit_inv);
index 1829742bf4793fae0060dbb4bd31b4e0bfbacc4f..4b7993bf69b96bd42f503e8772f29caedf5dd4b0 100644 (file)
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
        return size;
 }
 
-static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
-{
-       if (size <= 256)
-               return copy_from_user_std(size, ptr, x);
-       return copy_from_user_mvcos(size, ptr, x);
-}
-
 static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
 {
        register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
        return size;
 }
 
-static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
-                                      const void *x)
-{
-       if (size <= 256)
-               return copy_to_user_std(size, ptr, x);
-       return copy_to_user_mvcos(size, ptr, x);
-}
-
 static size_t copy_in_user_mvcos(size_t size, void __user *to,
                                 const void __user *from)
 {
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
 }
 
 struct uaccess_ops uaccess_mvcos = {
-       .copy_from_user = copy_from_user_mvcos_check,
-       .copy_from_user_small = copy_from_user_std,
-       .copy_to_user = copy_to_user_mvcos_check,
-       .copy_to_user_small = copy_to_user_std,
-       .copy_in_user = copy_in_user_mvcos,
-       .clear_user = clear_user_mvcos,
-       .strnlen_user = strnlen_user_std,
-       .strncpy_from_user = strncpy_from_user_std,
-       .futex_atomic_op = futex_atomic_op_std,
-       .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
-
-struct uaccess_ops uaccess_mvcos_switch = {
        .copy_from_user = copy_from_user_mvcos,
-       .copy_from_user_small = copy_from_user_mvcos,
        .copy_to_user = copy_to_user_mvcos,
-       .copy_to_user_small = copy_to_user_mvcos,
        .copy_in_user = copy_in_user_mvcos,
        .clear_user = clear_user_mvcos,
        .strnlen_user = strnlen_user_mvcos,
index 1694d738b17527aad71850c8fc772e755d26ca54..97e03caf782598a20857ab276ff03df701c4c3f9 100644 (file)
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
 
 struct uaccess_ops uaccess_pt = {
        .copy_from_user         = copy_from_user_pt,
-       .copy_from_user_small   = copy_from_user_pt,
        .copy_to_user           = copy_to_user_pt,
-       .copy_to_user_small     = copy_to_user_pt,
        .copy_in_user           = copy_in_user_pt,
        .clear_user             = clear_user_pt,
        .strnlen_user           = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644 (file)
index 4a75d47..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *  Standard user space access functions based on mvcp/mvcs and doing
- *  interesting things in the secondary space mode.
- *
- *    Copyright IBM Corp. 2006
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *              Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI    "ahi"
-#define ALR    "alr"
-#define CLR    "clr"
-#define LHI    "lhi"
-#define SLR    "slr"
-#else
-#define AHI    "aghi"
-#define ALR    "algr"
-#define CLR    "clgr"
-#define LHI    "lghi"
-#define SLR    "slgr"
-#endif
-
-size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
-{
-       unsigned long tmp1, tmp2;
-
-       tmp1 = -256UL;
-       asm volatile(
-               "0: mvcp  0(%0,%2),0(%1),%3\n"
-               "10:jz    8f\n"
-               "1:"ALR"  %0,%3\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "2: mvcp  0(%0,%2),0(%1),%3\n"
-               "11:jnz   1b\n"
-               "   j     8f\n"
-               "3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
-               "  "LHI"  %3,-4096\n"
-               "   nr    %4,%3\n"      /* %4 = (ptr + 255) & -4096 */
-               "  "SLR"  %4,%1\n"
-               "  "CLR"  %0,%4\n"      /* copy crosses next page boundary? */
-               "   jnh   5f\n"
-               "4: mvcp  0(%4,%2),0(%1),%3\n"
-               "12:"SLR"  %0,%4\n"
-               "  "ALR"  %2,%4\n"
-               "5:"LHI"  %4,-1\n"
-               "  "ALR"  %4,%0\n"      /* copy remaining size, subtract 1 */
-               "   bras  %3,7f\n"      /* memset loop */
-               "   xc    0(1,%2),0(%2)\n"
-               "6: xc    0(256,%2),0(%2)\n"
-               "   la    %2,256(%2)\n"
-               "7:"AHI"  %4,-256\n"
-               "   jnm   6b\n"
-               "   ex    %4,0(%3)\n"
-               "   j     9f\n"
-               "8:"SLR"  %0,%0\n"
-               "9: \n"
-               EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
-               EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
-               : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
-                                      void *x)
-{
-       if (size <= 1024)
-               return copy_from_user_std(size, ptr, x);
-       return copy_from_user_pt(size, ptr, x);
-}
-
-size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
-{
-       unsigned long tmp1, tmp2;
-
-       tmp1 = -256UL;
-       asm volatile(
-               "0: mvcs  0(%0,%1),0(%2),%3\n"
-               "7: jz    5f\n"
-               "1:"ALR"  %0,%3\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "2: mvcs  0(%0,%1),0(%2),%3\n"
-               "8: jnz   1b\n"
-               "   j     5f\n"
-               "3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
-               "  "LHI"  %3,-4096\n"
-               "   nr    %4,%3\n"      /* %4 = (ptr + 255) & -4096 */
-               "  "SLR"  %4,%1\n"
-               "  "CLR"  %0,%4\n"      /* copy crosses next page boundary? */
-               "   jnh   6f\n"
-               "4: mvcs  0(%4,%1),0(%2),%3\n"
-               "9:"SLR"  %0,%4\n"
-               "   j     6f\n"
-               "5:"SLR"  %0,%0\n"
-               "6: \n"
-               EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
-               EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
-               : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t copy_to_user_std_check(size_t size, void __user *ptr,
-                                    const void *x)
-{
-       if (size <= 1024)
-               return copy_to_user_std(size, ptr, x);
-       return copy_to_user_pt(size, ptr, x);
-}
-
-static size_t copy_in_user_std(size_t size, void __user *to,
-                              const void __user *from)
-{
-       unsigned long tmp1;
-
-       asm volatile(
-               "   sacf  256\n"
-               "  "AHI"  %0,-1\n"
-               "   jo    5f\n"
-               "   bras  %3,3f\n"
-               "0:"AHI"  %0,257\n"
-               "1: mvc   0(1,%1),0(%2)\n"
-               "   la    %1,1(%1)\n"
-               "   la    %2,1(%2)\n"
-               "  "AHI"  %0,-1\n"
-               "   jnz   1b\n"
-               "   j     5f\n"
-               "2: mvc   0(256,%1),0(%2)\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "3:"AHI"  %0,-256\n"
-               "   jnm   2b\n"
-               "4: ex    %0,1b-0b(%3)\n"
-               "5: "SLR"  %0,%0\n"
-               "6: sacf  0\n"
-               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
-               : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t clear_user_std(size_t size, void __user *to)
-{
-       unsigned long tmp1, tmp2;
-
-       asm volatile(
-               "   sacf  256\n"
-               "  "AHI"  %0,-1\n"
-               "   jo    5f\n"
-               "   bras  %3,3f\n"
-               "   xc    0(1,%1),0(%1)\n"
-               "0:"AHI"  %0,257\n"
-               "   la    %2,255(%1)\n" /* %2 = ptr + 255 */
-               "   srl   %2,12\n"
-               "   sll   %2,12\n"      /* %2 = (ptr + 255) & -4096 */
-               "  "SLR"  %2,%1\n"
-               "  "CLR"  %0,%2\n"      /* clear crosses next page boundary? */
-               "   jnh   5f\n"
-               "  "AHI"  %2,-1\n"
-               "1: ex    %2,0(%3)\n"
-               "  "AHI"  %2,1\n"
-               "  "SLR"  %0,%2\n"
-               "   j     5f\n"
-               "2: xc    0(256,%1),0(%1)\n"
-               "   la    %1,256(%1)\n"
-               "3:"AHI"  %0,-256\n"
-               "   jnm   2b\n"
-               "4: ex    %0,0(%3)\n"
-               "5: "SLR"  %0,%0\n"
-               "6: sacf  0\n"
-               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
-               : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-size_t strnlen_user_std(size_t size, const char __user *src)
-{
-       register unsigned long reg0 asm("0") = 0UL;
-       unsigned long tmp1, tmp2;
-
-       if (unlikely(!size))
-               return 0;
-       asm volatile(
-               "   la    %2,0(%1)\n"
-               "   la    %3,0(%0,%1)\n"
-               "  "SLR"  %0,%0\n"
-               "   sacf  256\n"
-               "0: srst  %3,%2\n"
-               "   jo    0b\n"
-               "   la    %0,1(%3)\n"   /* strnlen_user results includes \0 */
-               "  "SLR"  %0,%1\n"
-               "1: sacf  0\n"
-               EX_TABLE(0b,1b)
-               : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
-               : "d" (reg0) : "cc", "memory");
-       return size;
-}
-
-size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
-{
-       size_t done, len, offset, len_str;
-
-       if (unlikely(!count))
-               return 0;
-       done = 0;
-       do {
-               offset = (size_t)src & ~PAGE_MASK;
-               len = min(count - done, PAGE_SIZE - offset);
-               if (copy_from_user_std(len, src, dst))
-                       return -EFAULT;
-               len_str = strnlen(dst, len);
-               done += len_str;
-               src += len_str;
-               dst += len_str;
-       } while ((len_str == len) && (done < count));
-       return done;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)     \
-       asm volatile(                                                   \
-               "   sacf  256\n"                                        \
-               "0: l     %1,0(%6)\n"                                   \
-               "1:"insn                                                \
-               "2: cs    %1,%2,0(%6)\n"                                \
-               "3: jl    1b\n"                                         \
-               "   lhi   %0,0\n"                                       \
-               "4: sacf  0\n"                                          \
-               EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)         \
-               : "=d" (ret), "=&d" (oldval), "=&d" (newval),           \
-                 "=m" (*uaddr)                                         \
-               : "0" (-EFAULT), "d" (oparg), "a" (uaddr),              \
-                 "m" (*uaddr) : "cc");
-
-int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
-{
-       int oldval = 0, newval, ret;
-
-       switch (op) {
-       case FUTEX_OP_SET:
-               __futex_atomic_op("lr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_ADD:
-               __futex_atomic_op("lr %2,%1\nar %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_OR:
-               __futex_atomic_op("lr %2,%1\nor %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_ANDN:
-               __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_XOR:
-               __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       default:
-               ret = -ENOSYS;
-       }
-       *old = oldval;
-       return ret;
-}
-
-int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
-                            u32 oldval, u32 newval)
-{
-       int ret;
-
-       asm volatile(
-               "   sacf 256\n"
-               "0: cs   %1,%4,0(%5)\n"
-               "1: la   %0,0\n"
-               "2: sacf 0\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
-               : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
-               : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
-               : "cc", "memory" );
-       *uval = oldval;
-       return ret;
-}
-
-struct uaccess_ops uaccess_std = {
-       .copy_from_user = copy_from_user_std_check,
-       .copy_from_user_small = copy_from_user_std,
-       .copy_to_user = copy_to_user_std_check,
-       .copy_to_user_small = copy_to_user_std,
-       .copy_in_user = copy_in_user_std,
-       .clear_user = clear_user_std,
-       .strnlen_user = strnlen_user_std,
-       .strncpy_from_user = strncpy_from_user_std,
-       .futex_atomic_op = futex_atomic_op_std,
-       .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
index 58bff541fde9c95edcd0db4f49da3aa8629371b9..a6ba0d7243356649e6882b5dc16a85c445ed120e 100644 (file)
@@ -19,6 +19,8 @@
 #include <math-emu/double.h>
 #include <math-emu/quad.h>
 
+#define FPC_VALID_MASK         0xF8F8FF03
+
 /*
  * I miss a macro to round a floating point number to the
  * nearest integer in the same floating point format.
index 9d84a1feefef0e57005f6a31ae50079ebec3a15b..76741306af2a9d5193c4a12c2210d520c26c129f 100644 (file)
@@ -257,8 +257,8 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
        char buf[16], *p;
+       unsigned int len;
        long nr;
-       int len;
 
        if (!*lenp || (*ppos && !write)) {
                *lenp = 0;
@@ -298,7 +298,7 @@ static int cmm_timeout_handler(ctl_table *ctl, int write,  void __user *buffer,
 {
        char buf[64], *p;
        long nr, seconds;
-       int len;
+       unsigned int len;
 
        if (!*lenp || (*ppos && !write)) {
                *lenp = 0;
index fc6679210d83249e9b34c2f44070ab1021852e91..8f29762671cf171dc6a79d64f718e4b02d247cb9 100644 (file)
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
        if (trans_exc_code == 2)
                /* Access via secondary space, set_fs setting decides */
                return current->thread.mm_segment.ar4;
-       if (s390_user_mode == HOME_SPACE_MODE)
-               /* User space if the access has been done via home space. */
-               return trans_exc_code == 3;
        /*
-        * If the user space is not the home space the kernel runs in home
-        * space. Access via secondary space has already been covered,
-        * access via primary space or access register is from user space
+        * Access via primary space or access register is from user space
         * and access via home space is from the kernel.
         */
        return trans_exc_code != 3;
@@ -471,7 +466,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
        int access, fault;
 
        /* Emulate a uaccess fault from kernel mode. */
-       regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+       regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
        if (!irqs_disabled())
                regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
        regs.psw.addr = (unsigned long) __builtin_return_address(0);
index 5d758db27bdced58d929d736363bcafc09c199ab..639fce464008854cf3d23c22f6dcbe4a970add68 100644 (file)
@@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
-       if ((end < start) || (end > TASK_SIZE))
+       if ((end <= start) || (end > TASK_SIZE))
                return 0;
-
+       /*
+        * local_irq_save() doesn't prevent pagetable teardown, but does
+        * prevent the pagetables from being freed on s390.
+        *
+        * So long as we atomically load page table pointers versus teardown,
+        * we can follow the address down to the the page and take a ref on it.
+        */
        local_irq_save(flags);
        pgdp = pgd_offset(mm, addr);
        do {
@@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long addr, len, end;
-       unsigned long next;
-       pgd_t *pgdp, pgd;
-       int nr = 0;
+       int nr, ret;
 
        start &= PAGE_MASK;
-       addr = start;
-       len = (unsigned long) nr_pages << PAGE_SHIFT;
-       end = start + len;
-       if ((end < start) || (end > TASK_SIZE))
-               goto slow_irqon;
-
-       /*
-        * local_irq_disable() doesn't prevent pagetable teardown, but does
-        * prevent the pagetables from being freed on s390.
-        *
-        * So long as we atomically load page table pointers versus teardown,
-        * we can follow the address down to the the page and take a ref on it.
-        */
-       local_irq_disable();
-       pgdp = pgd_offset(mm, addr);
-       do {
-               pgd = *pgdp;
-               barrier();
-               next = pgd_addr_end(addr, end);
-               if (pgd_none(pgd))
-                       goto slow;
-               if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
-                       goto slow;
-       } while (pgdp++, addr = next, addr != end);
-       local_irq_enable();
-
-       VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
-       return nr;
-
-       {
-               int ret;
-slow:
-               local_irq_enable();
-slow_irqon:
-               /* Try to get the remaining pages with get_user_pages */
-               start += nr << PAGE_SHIFT;
-               pages += nr;
-
-               down_read(&mm->mmap_sem);
-               ret = get_user_pages(current, mm, start,
-                       (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
-               up_read(&mm->mmap_sem);
-
-               /* Have to be a bit careful with return values */
-               if (nr > 0) {
-                       if (ret < 0)
-                               ret = nr;
-                       else
-                               ret += nr;
-               }
-
-               return ret;
-       }
+       nr = __get_user_pages_fast(start, nr_pages, write, pages);
+       if (nr == nr_pages)
+               return nr;
+
+       /* Try to get the remaining pages with get_user_pages */
+       start += nr << PAGE_SHIFT;
+       pages += nr;
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(current, mm, start,
+                            nr_pages - nr, write, 0, pages, NULL);
+       up_read(&mm->mmap_sem);
+       /* Have to be a bit careful with return values */
+       if (nr > 0)
+               ret = (ret < 0) ? nr : ret + nr;
+       return ret;
 }
index 990397420e6bcf8262b92806b5f5b57bff273373..8400f494623f4591a5de3e5ea6442a08fe767ed4 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/pgtable.h>
 #include <asm/page.h>
 
+#if PAGE_DEFAULT_KEY
 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
 {
        asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
        return addr;
 }
 
-void storage_key_init_range(unsigned long start, unsigned long end)
+void __storage_key_init_range(unsigned long start, unsigned long end)
 {
        unsigned long boundary, size;
 
@@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)
                start += PAGE_SIZE;
        }
 }
+#endif
 
 static pte_t *walk_page_table(unsigned long addr)
 {
index de8cbc30dcd1be13cb34dd0fc0e7ad5a8a68372e..94f37a9fb1e543d2abc1efc318aa80212ae6db13 100644 (file)
@@ -1157,10 +1157,6 @@ int s390_enable_sie(void)
        struct mm_struct *mm = tsk->mm;
        struct mmu_gather tlb;
 
-       /* Do we have switched amode? If no, we cannot do sie */
-       if (s390_user_mode == HOME_SPACE_MODE)
-               return -EINVAL;
-
        /* Do we have pgstes? if yes, we are done */
        if (mm_has_pgste(tsk->mm))
                return 0;
index 709239285869caa29fde3db90ea31da213cb5cb2..16871da3737164d986412ff40cc8fb95c2c7119b 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/random.h>
 #include <linux/init.h>
 #include <asm/cacheflush.h>
-#include <asm/processor.h>
 #include <asm/facility.h>
+#include <asm/dis.h>
 
 /*
  * Conventions:
@@ -156,8 +156,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                EMIT6(0xeb8ff058, 0x0024);
                /* lgr %r14,%r15 */
                EMIT4(0xb90400ef);
-               /* ahi %r15,<offset> */
-               EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
+               /* aghi %r15,<offset> */
+               EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
                /* stg %r14,152(%r15) */
                EMIT6(0xe3e0f098, 0x0024);
        } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
        struct bpf_binary_header *header = (void *)addr;
 
        if (fp->bpf_func == sk_run_filter)
-               return;
+               goto free_filter;
        set_memory_rw(addr, header->pages);
        module_free(NULL, header);
+free_filter:
+       kfree(fp);
 }
index f17a8343e3609d7d644b1d5896fd49bf4fca9e88..0c9a17780e4b9be96288a75a356444761f0085d5 100644 (file)
@@ -120,26 +120,17 @@ EXPORT_SYMBOL_GPL(pci_proc_domain);
 static int zpci_set_airq(struct zpci_dev *zdev)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
-       struct zpci_fib *fib;
-       int rc;
-
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
+       struct zpci_fib fib = {0};
 
-       fib->isc = PCI_ISC;
-       fib->sum = 1;           /* enable summary notifications */
-       fib->noi = airq_iv_end(zdev->aibv);
-       fib->aibv = (unsigned long) zdev->aibv->vector;
-       fib->aibvo = 0;         /* each zdev has its own interrupt vector */
-       fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
-       fib->aisbo = zdev->aisb & 63;
+       fib.isc = PCI_ISC;
+       fib.sum = 1;            /* enable summary notifications */
+       fib.noi = airq_iv_end(zdev->aibv);
+       fib.aibv = (unsigned long) zdev->aibv->vector;
+       fib.aibvo = 0;          /* each zdev has its own interrupt vector */
+       fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+       fib.aisbo = zdev->aisb & 63;
 
-       rc = zpci_mod_fc(req, fib);
-       pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
-
-       free_page((unsigned long) fib);
-       return rc;
+       return zpci_mod_fc(req, &fib);
 }
 
 struct mod_pci_args {
@@ -152,22 +143,14 @@ struct mod_pci_args {
 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
-       struct zpci_fib *fib;
-       int rc;
-
-       /* The FIB must be available even if it's not used */
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
+       struct zpci_fib fib = {0};
 
-       fib->pba = args->base;
-       fib->pal = args->limit;
-       fib->iota = args->iota;
-       fib->fmb_addr = args->fmb_addr;
+       fib.pba = args->base;
+       fib.pal = args->limit;
+       fib.iota = args->iota;
+       fib.fmb_addr = args->fmb_addr;
 
-       rc = zpci_mod_fc(req, fib);
-       free_page((unsigned long) fib);
-       return rc;
+       return zpci_mod_fc(req, &fib);
 }
 
 /* Modify PCI: Register I/O address translation parameters */
@@ -424,7 +407,6 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        struct msi_msg msg;
        int rc;
 
-       pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
        if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
                return -EINVAL;
        msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
@@ -489,7 +471,6 @@ out_msi:
 out_si:
        airq_iv_free_bit(zpci_aisb_iv, aisb);
 out:
-       dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
        return rc;
 }
 
@@ -499,14 +480,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
        struct msi_desc *msi;
        int rc;
 
-       pr_info("%s: on pdev: %p\n", __func__, pdev);
-
        /* Disable adapter interrupts */
        rc = zpci_clear_airq(zdev);
-       if (rc) {
-               dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+       if (rc)
                return;
-       }
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
@@ -625,8 +602,11 @@ static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned lo
        r->name = name;
 
        rc = request_resource(&iomem_resource, r);
-       if (rc)
-               pr_debug("request resource %pR failed\n", r);
+       if (rc) {
+               kfree(r->name);
+               kfree(r);
+               return ERR_PTR(-ENOMEM);
+       }
        return r;
 }
 
@@ -708,6 +688,47 @@ void pcibios_disable_device(struct pci_dev *pdev)
        zdev->pdev = NULL;
 }
 
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+static int zpci_restore(struct device *dev)
+{
+       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+       int ret = 0;
+
+       if (zdev->state != ZPCI_FN_STATE_ONLINE)
+               goto out;
+
+       ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+       if (ret)
+               goto out;
+
+       zpci_map_resources(zdev);
+       zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+                          zdev->start_dma + zdev->iommu_size - 1,
+                          (u64) zdev->dma_table);
+
+out:
+       return ret;
+}
+
+static int zpci_freeze(struct device *dev)
+{
+       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+
+       if (zdev->state != ZPCI_FN_STATE_ONLINE)
+               return 0;
+
+       zpci_unregister_ioat(zdev, 0);
+       return clp_disable_fh(zdev);
+}
+
+struct dev_pm_ops pcibios_pm_ops = {
+       .thaw_noirq = zpci_restore,
+       .freeze_noirq = zpci_freeze,
+       .restore_noirq = zpci_restore,
+       .poweroff_noirq = zpci_freeze,
+};
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
 static int zpci_scan_bus(struct zpci_dev *zdev)
 {
        struct resource *res;
@@ -781,7 +802,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
        rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
        if (rc)
                goto out;
-       pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
 
        rc = zpci_dma_init_device(zdev);
        if (rc)
@@ -901,10 +921,6 @@ static int __init pci_base_init(void)
            || !test_facility(71) || !test_facility(72))
                return 0;
 
-       pr_info("Probing PCI hardware: PCI:%d  SID:%d  AEN:%d\n",
-               test_facility(69), test_facility(70),
-               test_facility(71));
-
        rc = zpci_debug_init();
        if (rc)
                goto out;
index 475563c3d1e40d401417c6503946f1933457980d..84147984224a996fca36b72d3aff574df229f1c8 100644 (file)
 #include <asm/pci_debug.h>
 #include <asm/pci_clp.h>
 
+static inline void zpci_err_clp(unsigned int rsp, int rc)
+{
+       struct {
+               unsigned int rsp;
+               int rc;
+       } __packed data = {rsp, rc};
+
+       zpci_err_hex(&data, sizeof(data));
+}
+
 /*
  * Call Logical Processor
  * Retry logic is handled by the caller.
@@ -54,7 +64,6 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
        zdev->msi_addr = response->msia;
        zdev->fmb_update = response->mui;
 
-       pr_debug("Supported number of MSI vectors: %u\n", response->noi);
        switch (response->version) {
        case 1:
                zdev->max_bus_speed = PCIE_SPEED_5_0GT;
@@ -84,8 +93,8 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
                clp_store_query_pci_fngrp(zdev, &rrb->response);
        else {
-               pr_err("Query PCI FNGRP failed with response: %x  cc: %d\n",
-                       rrb->response.hdr.rsp, rc);
+               zpci_err("Q PCI FGRP:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
        clp_free_block(rrb);
@@ -131,8 +140,8 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
                if (rrb->response.pfgid)
                        rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
        } else {
-               pr_err("Query PCI failed with response: %x  cc: %d\n",
-                        rrb->response.hdr.rsp, rc);
+               zpci_err("Q PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
 out:
@@ -206,8 +215,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
                *fh = rrb->response.fh;
        else {
-               zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
-                        rrb->response.hdr.rsp);
+               zpci_err("Set PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
        clp_free_block(rrb);
@@ -262,8 +271,8 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
                /* Get PCI function handle list */
                rc = clp_instr(rrb);
                if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
-                       pr_err("List PCI failed with response: 0x%x  cc: %d\n",
-                               rrb->response.hdr.rsp, rc);
+                       zpci_err("List PCI FN:\n");
+                       zpci_err_clp(rrb->response.hdr.rsp, rc);
                        rc = -EIO;
                        goto out;
                }
@@ -273,17 +282,11 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
 
                entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
                        rrb->response.entry_size;
-               pr_info("Detected number of PCI functions: %u\n", entries);
 
-               /* Store the returned resume token as input for the next call */
                resume_token = rrb->response.resume_token;
-
                for (i = 0; i < entries; i++)
                        cb(&rrb->response.fh_list[i]);
        } while (resume_token);
-
-       pr_debug("Maximum number of supported PCI functions: %u\n",
-               rrb->response.max_fn);
 out:
        return rc;
 }
index 7e5573acb06375791ef82582d400ede99f8fe069..9b83d080902dfb6d406c1fb026c95414d7494a53 100644 (file)
@@ -145,10 +145,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
                return -EINVAL;
 
        spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
-       if (!zdev->dma_table) {
-               dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+       if (!zdev->dma_table)
                goto no_refresh;
-       }
 
        for (i = 0; i < nr_pages; i++) {
                dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
@@ -280,11 +278,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
        size = nr_pages * PAGE_SIZE;
 
        dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
-       if (dma_addr + size > zdev->end_dma) {
-               dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
-                        dma_addr, size, zdev->end_dma);
+       if (dma_addr + size > zdev->end_dma)
                goto out_free;
-       }
 
        if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
                flags |= ZPCI_TABLE_PROTECTED;
@@ -297,7 +292,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 out_free:
        dma_free_iommu(zdev, iommu_page_index, nr_pages);
 out_err:
-       dev_err(dev, "Failed to map addr: %lx\n", pa);
+       zpci_err("map error:\n");
+       zpci_err_hex(&pa, sizeof(pa));
        return DMA_ERROR_CODE;
 }
 
@@ -312,8 +308,10 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr = dma_addr & PAGE_MASK;
        if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
-                            ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
-               dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+                            ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+               zpci_err("unmap error:\n");
+               zpci_err_hex(&dma_addr, sizeof(dma_addr));
+       }
 
        atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
        iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
index 0aecaf9548458e4a88dd450edd65f78d3b1d0449..278e671ec9ac22b977c0598aaa31552d3e32b567 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/pci.h>
+#include <asm/pci_debug.h>
 
 /* Content Code Description for PCI Function Error */
 struct zpci_ccdf_err {
@@ -41,25 +42,15 @@ struct zpci_ccdf_avail {
        u16 pec;                        /* PCI event code */
 } __packed;
 
-static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
-{
-       struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-
-       zpci_err("SEI error CCD:\n");
-       zpci_err_hex(ccdf, sizeof(*ccdf));
-       dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
-}
-
 static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+       struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
 
-       pr_err("%s%s: availability event: fh: 0x%x  fid: 0x%x  event code: 0x%x  reason:",
-               (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
-               (zdev) ? dev_name(&zdev->pdev->dev) : "?",
-               ccdf->fh, ccdf->fid, ccdf->pec);
-       print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
-                      16, 1, ccdf, sizeof(*ccdf), false);
+       pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
+               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+       zpci_err("avail CCDF:\n");
+       zpci_err_hex(ccdf, sizeof(*ccdf));
 
        switch (ccdf->pec) {
        case 0x0301:
@@ -79,14 +70,16 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
 void zpci_event_error(void *data)
 {
        struct zpci_ccdf_err *ccdf = data;
-       struct zpci_dev *zdev;
+       struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+       zpci_err("error CCDF:\n");
+       zpci_err_hex(ccdf, sizeof(*ccdf));
 
-       zpci_event_log_err(ccdf);
-       zdev = get_zdev_by_fid(ccdf->fid);
-       if (!zdev) {
-               pr_err("Error event for unknown fid: %x", ccdf->fid);
+       if (!zdev)
                return;
-       }
+
+       pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
+              pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
 }
 
 void zpci_event_availability(void *data)
index a1be70db75fec95d9cfa087008edf86a6baf7280..305f7ee1f382e60739464c6d0f3de98855d638ea 100644 (file)
@@ -2,6 +2,7 @@ menu "Machine selection"
 
 config SCORE
        def_bool y
+       select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
        select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+config NO_IOMEM
+       def_bool y
index 974aefe861233b8615ed61253a6f7a1d4b21085c..9e3e060290e0895529c864d7e6892e0c47708f43 100644 (file)
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
 #
 KBUILD_AFLAGS += $(cflags-y)
 KBUILD_CFLAGS += $(cflags-y)
-KBUILD_AFLAGS_MODULE += -mlong-calls
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_AFLAGS_MODULE +=
+KBUILD_CFLAGS_MODULE +=
 LDFLAGS += --oformat elf32-littlescore
 LDFLAGS_vmlinux        += -G0 -static -nostdlib
 
index f909ac3144a45a895751d038fbfea7495be59898..961bd64015a817b50452499d992ce23f14291247 100644 (file)
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
                                __wsum sum)
 {
        __asm__ __volatile__(
-               ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
-               ".set\tnoat\n\t"
-               "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
-               "sltu\t$1, %0, %5\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %6\t\t\t# csum\n\t"
-               "sltu\t$1, %0, %6\n\t"
-               "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 4(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 8(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 12(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 0(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 4(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 8(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 12(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "addu\t%0, $1\t\t\t# Add final carry\n\t"
-               ".set\tnoat\n\t"
-               ".set\tnoreorder"
+               ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
+               "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
+               "cmp.c\t%5, %0\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %6\t\t\t# csum\n\t"
+               "cmp.c\t%6, %0\n\t"
+               "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "1:lw\t%1, [%2, 4]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%2,8]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%2, 12]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0,%1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 0]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 4]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 8]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 12]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:\n\t"
+               ".set\toptimize"
                : "=r" (sum), "=r" (proto)
                : "r" (saddr), "r" (daddr),
                  "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
index fbbfd7132e3b30b6d338fec1f9ea8f7572a76d7d..574c8827abe2381f2db65dfaf3a8f889cf52987b 100644 (file)
@@ -5,5 +5,4 @@
 
 #define virt_to_bus    virt_to_phys
 #define bus_to_virt    phys_to_virt
-
 #endif /* _ASM_SCORE_IO_H */
index 059a61b7071b2ab4d8e5ada5e9d1aa732b435206..716b3fd1d86397ea57389a106516eb12073dde43 100644 (file)
@@ -2,7 +2,7 @@
 #define _ASM_SCORE_PGALLOC_H
 
 #include <linux/mm.h>
-
+#include <linux/highmem.h>
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
        pte_t *pte)
 {
index 7234ed09b7b7ef5e815b749d8f364921a04b6500..befb87d30a89a0d9d21a774fb8d542963224a6ff 100644 (file)
@@ -264,7 +264,7 @@ resume_kernel:
        disable_irq
        lw      r8, [r28, TI_PRE_COUNT]
        cmpz.c  r8
-       bne     r8, restore_all
+       bne     restore_all
 need_resched:
        lw      r8, [r28, TI_FLAGS]
        andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
        sw      r9, [r0, PT_EPC]
 
        cmpi.c  r27, __NR_syscalls      # check syscall number
-       bgeu    illegal_syscall
+       bcs     illegal_syscall
 
        slli    r8, r27, 2              # get syscall routine
        la      r11, sys_call_table
index f4c6d02421d3168cd8b17e87906490b214e7de96..a1519ad3d49d68e0e3202ecadb5b6e1b027a7df3 100644 (file)
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.reg0 = (unsigned long) childregs;
        if (unlikely(p->flags & PF_KTHREAD)) {
                memset(childregs, 0, sizeof(struct pt_regs));
-               p->thread->reg12 = usp;
-               p->thread->reg13 = arg;
+               p->thread.reg12 = usp;
+               p->thread.reg13 = arg;
                p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
        } else {
                *childregs = *current_pt_regs();
index 224f4bc9925ece7f38c85a145cf615aa78082569..f56d7f8b6f64b77a314ebb00c1e79dfe3a6f572c 100644 (file)
@@ -1,5 +1,6 @@
 config SUPERH
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select EXPERT
        select CLKDEV_LOOKUP
        select HAVE_IDE if HAS_IOPORT
index ec9ad593c3da743bacbd5875a577709a94e3333b..01a38696137e9952c9e07f91669685b3b349b50d 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/kdebug.h>
 #include <linux/types.h>
+#include <cpu/ubc.h>
 
 struct arch_hw_breakpoint {
        char            *name; /* Contains name of the symbol to set bkpt */
@@ -15,17 +16,6 @@ struct arch_hw_breakpoint {
        u16             type;
 };
 
-enum {
-       SH_BREAKPOINT_READ      = (1 << 1),
-       SH_BREAKPOINT_WRITE     = (1 << 2),
-       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
-
-       SH_BREAKPOINT_LEN_1     = (1 << 12),
-       SH_BREAKPOINT_LEN_2     = (1 << 13),
-       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
-       SH_BREAKPOINT_LEN_8     = (1 << 14),
-};
-
 struct sh_ubc {
        const char      *name;
        unsigned int    num_events;
diff --git a/arch/sh/include/cpu-common/cpu/ubc.h b/arch/sh/include/cpu-common/cpu/ubc.h
new file mode 100644 (file)
index 0000000..b604619
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 1),
+       SH_BREAKPOINT_WRITE     = (1 << 2),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 12),
+       SH_BREAKPOINT_LEN_2     = (1 << 13),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+       SH_BREAKPOINT_LEN_8     = (1 << 14),
+};
+
+#define UBC_64BIT      1
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
new file mode 100644 (file)
index 0000000..3371f90
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 2),
+       SH_BREAKPOINT_WRITE     = (1 << 3),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 0),
+       SH_BREAKPOINT_LEN_2     = (1 << 1),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+};
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
index 990195d9845607bfcca4a8b3cfcd05c41df582bc..92f0da4c86a7533e18269e6f5d439130e1a71815 100644 (file)
@@ -22,3 +22,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7264)   := pinmux-sh7264.o
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7269)    := pinmux-sh7269.o
 
 obj-$(CONFIG_GPIOLIB)                  += $(pinmux-y)
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += ubc.o
diff --git a/arch/sh/kernel/cpu/sh2a/ubc.c b/arch/sh/kernel/cpu/sh2a/ubc.c
new file mode 100644 (file)
index 0000000..ef95a9b
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ubc.c
+ *
+ * On-chip UBC support for SH-2A CPUs.
+ *
+ * Copyright (C) 2009 - 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_BAR(idx)   (0xfffc0400 + (0x10 * idx))
+#define UBC_BAMR(idx)  (0xfffc0404 + (0x10 * idx))
+#define UBC_BBR(idx)   (0xfffc04A0 + (0x10 * idx))
+#define UBC_BDR(idx)   (0xfffc0408 + (0x10 * idx))
+#define UBC_BDMR(idx)  (0xfffc040C + (0x10 * idx))
+
+#define UBC_BRCR       0xfffc04C0
+
+/* BBR */
+#define UBC_BBR_UBID   (1 << 13)     /* User Break Interrupt Disable */
+#define UBC_BBR_DBE    (1 << 12)     /* Data Break Enable */
+#define UBC_BBR_CD_C   (1 << 6)      /* C Bus Cycle */
+#define UBC_BBR_CD_I   (2 << 6)      /* I Bus Cycle */
+#define UBC_BBR_ID_I   (1 << 4)      /* Break Condition is instruction fetch cycle */
+#define UBC_BBR_ID_D   (2 << 4)      /* Break Condition is data access cycle */
+#define UBC_BBR_ID_ID  (3 << 4)      /* Break Condition is instruction fetch or data access cycle */
+
+#define UBC_CRR_BIE    (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE     (1 << 0)
+
+static struct sh_ubc sh2a_ubc;
+
+static void sh2a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_DBE | UBC_BBR_CD_C | UBC_BBR_ID_ID |
+                    info->len | info->type, UBC_BBR(idx));
+       __raw_writel(info->address, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_UBID, UBC_BBR(idx));
+       __raw_writel(0, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_enable_all(unsigned long mask)
+{
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (mask & (1 << i))
+                       __raw_writel(__raw_readl(UBC_BBR(i)) & ~UBC_BBR_UBID,
+                                    UBC_BBR(i));
+}
+
+static void sh2a_ubc_disable_all(void)
+{
+       int i;
+       
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               __raw_writel(__raw_readl(UBC_BBR(i)) | UBC_BBR_UBID,
+                            UBC_BBR(i));
+}
+
+static unsigned long sh2a_ubc_active_mask(void)
+{
+       unsigned long active = 0;
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (!(__raw_readl(UBC_BBR(i)) & UBC_BBR_UBID))
+                       active |= (1 << i);
+
+       return active;
+}
+
+static unsigned long sh2a_ubc_triggered_mask(void)
+{
+       unsigned int ret, mask;
+       
+       mask = 0;
+       ret = __raw_readl(UBC_BRCR);
+       if ((ret & (1 << 15)) || (ret & (1 << 13))) {
+               mask |= (1 << 0); /* Match condition for channel 0 */
+       } else 
+               mask &= ~(1 << 0);
+       
+       if ((ret & (1 << 14)) || (ret & (1 << 12))) {
+               mask |= (1 << 1); /* Match condition for channel 1 */
+       } else 
+               mask &= ~(1 << 1);
+
+       return mask;
+}
+
+static void sh2a_ubc_clear_triggered_mask(unsigned long mask)
+{
+       if (mask & (1 << 0)) /* Channel 0 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 15) | (1 << 13)), UBC_BRCR);
+       
+       if (mask & (1 << 1)) /* Channel 1 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 14) | (1 << 12)), UBC_BRCR);
+}
+
+static struct sh_ubc sh2a_ubc = {
+       .name                   = "SH-2A",
+       .num_events             = 2,
+       .trap_nr                = 0x1e0,
+       .enable                 = sh2a_ubc_enable,
+       .disable                = sh2a_ubc_disable,
+       .enable_all             = sh2a_ubc_enable_all,
+       .disable_all            = sh2a_ubc_disable_all,
+       .active_mask            = sh2a_ubc_active_mask,
+       .triggered_mask         = sh2a_ubc_triggered_mask,
+       .clear_triggered_mask   = sh2a_ubc_clear_triggered_mask,
+};
+
+static int __init sh2a_ubc_init(void)
+{
+       struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+       int i;
+
+       /*
+        * The UBC MSTP bit is optional, as not all platforms will have
+        * it. Just ignore it if we can't find it.
+        */
+       if (IS_ERR(ubc_iclk))
+               ubc_iclk = NULL;
+
+       clk_enable(ubc_iclk);
+
+       for (i = 0; i < sh2a_ubc.num_events; i++) {
+               __raw_writel(0, UBC_BAMR(i));
+               __raw_writel(0, UBC_BBR(i));
+       }
+
+       clk_disable(ubc_iclk);
+
+       sh2a_ubc.clk = ubc_iclk;
+
+       return register_sh_ubc(&sh2a_ubc);
+}
+arch_initcall(sh2a_ubc_init);
index f9173766ec4be2393e4207efe7092741f35fc271..ac4922ad3c148d3fc0883afe9ccb8a50d078fc76 100644 (file)
@@ -113,9 +113,11 @@ static int get_hbp_len(u16 hbp_len)
        case SH_BREAKPOINT_LEN_4:
                len_in_bytes = 4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                len_in_bytes = 8;
                break;
+#endif
        }
        return len_in_bytes;
 }
@@ -149,9 +151,11 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        case SH_BREAKPOINT_LEN_4:
                *gen_len = HW_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                *gen_len = HW_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -190,9 +194,11 @@ static int arch_build_bp_info(struct perf_event *bp)
        case HW_BREAKPOINT_LEN_4:
                info->len = SH_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case HW_BREAKPOINT_LEN_8:
                info->len = SH_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -240,9 +246,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        case SH_BREAKPOINT_LEN_4:
                align = 3;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                align = 7;
                break;
+#endif
        default:
                return ret;
        }
index 2137ad6674388a8e920fba2d7b9fba0cf88512e6..258464973bcb02a040e533b9fd8a1e78212684ca 100644 (file)
@@ -12,6 +12,7 @@ config 64BIT
 config SPARC
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
        select OF
        select OF_PROMTREE
        select HAVE_IDE
@@ -506,12 +507,17 @@ config SUN_OPENPROMFS
          Only choose N if you know in advance that you will not need to modify
          OpenPROM settings on the running system.
 
-# Makefile helper
+# Makefile helpers
 config SPARC64_PCI
        bool
        default y
        depends on SPARC64 && PCI
 
+config SPARC64_PCI_MSI
+       bool
+       default y
+       depends on SPARC64_PCI && PCI_MSI
+
 endmenu
 
 menu "Executable file formats"
index e204f902e6c9f3088ef11ea8dd571c80ba48b7f4..7c90c50c200d307465c5f814c2b4256f1a01b016 100644 (file)
@@ -254,7 +254,7 @@ static int sun_fd_request_irq(void)
                once = 1;
 
                error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
-                                   IRQF_DISABLED, "floppy", NULL);
+                                   0, "floppy", NULL);
 
                return ((error == 0) ? 0 : -1);
        }
index 5080d16a832ffec0c813b30cf546340e5047a7fd..ec2e2e2aba7d8f15419aa12bc6f790993f8c54d4 100644 (file)
@@ -9,7 +9,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-               asm goto("1:\n\t"
+               asm_volatile_goto("1:\n\t"
                         "nop\n\t"
                         "nop\n\t"
                         ".pushsection __jump_table,  \"aw\"\n\t"
index 4e1d66c3ce71b0b89bc2acb86643b3cebd445a06..0f21e9a5ca18879f5d919aa0fa05532204244eba 100644 (file)
@@ -72,6 +72,8 @@
 
 #define SO_BUSY_POLL           0x0030
 
+#define SO_MAX_PACING_RATE     0x0031
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index d432fb20358e1785c3f6d6d2f0784580497a76ae..d15cc1794b0ec4890d3f6f717680b3673e76a0a9 100644 (file)
@@ -1,3 +1,4 @@
+
 #
 # Makefile for the linux kernel.
 #
@@ -99,7 +100,7 @@ obj-$(CONFIG_STACKTRACE)     += stacktrace.o
 obj-$(CONFIG_SPARC64_PCI)    += pci.o pci_common.o psycho_common.o
 obj-$(CONFIG_SPARC64_PCI)    += pci_psycho.o pci_sabre.o pci_schizo.o
 obj-$(CONFIG_SPARC64_PCI)    += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
-obj-$(CONFIG_PCI_MSI)        += pci_msi.o
+obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o
 
 obj-$(CONFIG_COMPAT)         += sys32.o sys_sparc32.o signal32.o
 
index 62d6b153ffa2e82d895a4e81b51b2abe9490aee8..dff60abbea012f30a8b55221cc8d7a3a60a762ea 100644 (file)
@@ -849,9 +849,8 @@ void ldom_reboot(const char *boot_command)
        if (boot_command && strlen(boot_command)) {
                unsigned long len;
 
-               strcpy(full_boot_str, "boot ");
-               strlcpy(full_boot_str + strlen("boot "), boot_command,
-                       sizeof(full_boot_str + strlen("boot ")));
+               snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
+                        boot_command);
                len = strlen(full_boot_str);
 
                if (reboot_data_supported) {
index 54df554b82d98a684a3cbd7710ea10998e0d1722..e01d75d40329f010a1cc3c41020e100c6488e334 100644 (file)
@@ -1249,12 +1249,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
        snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
        snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
 
-       err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED,
+       err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
                          lp->rx_irq_name, lp);
        if (err)
                return err;
 
-       err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED,
+       err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
                          lp->tx_irq_name, lp);
        if (err) {
                free_irq(lp->cfg.rx_irq, lp);
index 9c7be59e6f5ad3ad360facd10c6a2c35fd3bde75..218b6b23c378f888ef6ab0accf7ee4882e2911e2 100644 (file)
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index d385eaadece7a68fe603eeb625b3caedb3c4c345..70979846076332bf7771d8517afdcc3415518b17 100644 (file)
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
  *
  * Atomically sets @v to @i and returns old @v
  */
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
 {
        return xchg64(&v->counter, n);
 }
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
  * Atomically checks if @v holds @o and replaces it with @n if so.
  * Returns the old value at @v.
  */
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
+                                       long long n)
 {
        return cmpxchg64(&v->counter, o, n);
 }
index 0d0395b1b1529d454f772ebb61e240d7fb41f52a..1ad4a1f7d42b8aa47eadbbb4cdcf2e50dc630fd0 100644 (file)
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
 /* A 64bit atomic type */
 
 typedef struct {
-       u64 __aligned(8) counter;
+       long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
  *
  * Atomically reads the value of @v.
  */
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
        /*
         * Requires an atomic op to read both 32-bit parts consistently.
         * Casting away const is safe since the atomic support routines
         * do not write to memory if the value has not been modified.
         */
-       return _atomic64_xchg_add((u64 *)&v->counter, 0);
+       return _atomic64_xchg_add((long long *)&v->counter, 0);
 }
 
 /**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
  *
  * Atomically adds @i to @v.
  */
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
 {
        _atomic64_xchg_add(&v->counter, i);
 }
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
        smp_mb();  /* barrier for proper semantics */
        return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline long long atomic64_add_unless(atomic64_t *v, long long a,
+                                       long long u)
 {
        smp_mb();  /* barrier for proper semantics */
        return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
  * atomic64_set() can't be just a raw store, since it would be lost if it
  * fell between the load and store of one of the other atomic ops.
  */
-static inline void atomic64_set(atomic64_t *v, u64 n)
+static inline void atomic64_set(atomic64_t *v, long long n)
 {
        _atomic64_xchg(&v->counter, n);
 }
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
-extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
-extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
-                                     int *lock, u64 o, u64 n);
+extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
+                                       long long o, long long n);
+extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
+                                       long long n);
+extern long long __atomic64_xchg_add_unless(volatile long long *p,
+                                       int *lock, long long o, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
index 4001d5eab4bb7f1fb59e6e62c924bd71b4b10e2f..0ccda3c425be0d3b19a27c13b68ad6b6eaa37525 100644 (file)
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
 int _atomic_xchg_add(int *v, int i);
 int _atomic_xchg_add_unless(int *v, int a, int u);
 int _atomic_cmpxchg(int *ptr, int o, int n);
-u64 _atomic64_xchg(u64 *v, u64 n);
-u64 _atomic64_xchg_add(u64 *v, u64 i);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+long long _atomic64_xchg(long long *v, long long n);
+long long _atomic64_xchg_add(long long *v, long long i);
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
+long long _atomic64_cmpxchg(long long *v, long long o, long long n);
 
 #define xchg(ptr, n)                                                   \
        ({                                                              \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 4)                                \
                        __cmpxchg_called_with_bad_pointer();            \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+               (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,     \
+                                               (int)n);                \
        })
 
 #define xchg64(ptr, n)                                                 \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 8)                                \
                        __xchg_called_with_bad_pointer();               \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
+               (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),      \
+                                               (long long)(n));        \
        })
 
 #define cmpxchg64(ptr, o, n)                                           \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 8)                                \
                        __cmpxchg_called_with_bad_pointer();            \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+               (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,     \
+                                       (long long)o, (long long)n);    \
        })
 
 #else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
                        __x = (typeof(__x))(unsigned long)              \
-                               __insn_exch4((ptr), (u32)(unsigned long)(n)); \
+                               __insn_exch4((ptr),                     \
+                                       (u32)(unsigned long)(n));       \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))                     \
+                       __x = (typeof(__x))                             \
                                __insn_exch((ptr), (unsigned long)(n)); \
                        break;                                          \
                default:                                                \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
                        __x = (typeof(__x))(unsigned long)              \
-                               __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
+                               __insn_cmpexch4((ptr),                  \
+                                       (u32)(unsigned long)(n));       \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
+                       __x = (typeof(__x))__insn_cmpexch((ptr),        \
+                                               (long long)(n));        \
                        break;                                          \
                default:                                                \
                        __cmpxchg_called_with_bad_pointer();            \
index 63294f5a8efbca7820c891232c7f79303213e653..4f7ae39fa2022c537770bd10e1cbb0e231249b53 100644 (file)
 #ifndef _ASM_TILE_PERCPU_H
 #define _ASM_TILE_PERCPU_H
 
-register unsigned long __my_cpu_offset __asm__("tp");
-#define __my_cpu_offset __my_cpu_offset
-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
+register unsigned long my_cpu_offset_reg asm("tp");
+
+#ifdef CONFIG_PREEMPT
+/*
+ * For full preemption, we can't just use the register variable
+ * directly, since we need barrier() to hazard against it, causing the
+ * compiler to reload anything computed from a previous "tp" value.
+ * But we also don't want to use volatile asm, since we'd like the
+ * compiler to be able to cache the value across multiple percpu reads.
+ * So we use a fake stack read as a hazard against barrier().
+ * The 'U' constraint is like 'm' but disallows postincrement.
+ */
+static inline unsigned long __my_cpu_offset(void)
+{
+       unsigned long tp;
+       register unsigned long *sp asm("sp");
+       asm("move %0, tp" : "=r" (tp) : "U" (*sp));
+       return tp;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+/*
+ * We don't need to hazard against barrier() since "tp" doesn't ever
+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
+ * changes at function call points, at which we are already re-reading
+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
+ */
+#define __my_cpu_offset my_cpu_offset_reg
+#endif
+
+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
 
 #include <asm-generic/percpu.h>
 
index df27a1fd94a310a759612a5c6706b1011a94f787..531f4c365351119eeb249904cc01fd60307931b4 100644 (file)
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
                0,
                "udn",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
                NULL
        },
 #ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
                1,  /* disabled pending hypervisor support */
                "idn",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
                NULL
        },
        {  /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
                0,
                "ipi",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
                NULL
        },
 #endif
index 088d5c141e681084ce030165f303d15f4b69876f..2cbe6d5dd6b04db3ea071fb12c3dbb1866dc818e 100644 (file)
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
        }
        bzt     r28, 1f
        bnz     r29, 1f
+       /* Disable interrupts explicitly for preemption. */
+       IRQ_DISABLE(r20,r21)
+       TRACE_IRQS_OFF
        jal     preempt_schedule_irq
        FEEDBACK_REENTER(interrupt_return)
 1:
index ec755d3f373467ebe271dd1743cc6f56b6f9d7da..b8fc497f24370c0d7c17536664cb19c40923e5bf 100644 (file)
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
        }
        beqzt   r28, 1f
        bnez    r29, 1f
+       /* Disable interrupts explicitly for preemption. */
+       IRQ_DISABLE(r20,r21)
+       TRACE_IRQS_OFF
        jal     preempt_schedule_irq
        FEEDBACK_REENTER(interrupt_return)
 1:
index 362284af3afd31ab39081447b6f1295a866c0ab9..c93977a62116dfecd12a74376e307c89c7875b91 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mmzone.h>
 #include <linux/dcache.h>
 #include <linux/fs.h>
+#include <linux/string.h>
 #include <asm/backtrace.h>
 #include <asm/page.h>
 #include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
        }
 
        if (vma->vm_file) {
-               char *s;
                p = d_path(&vma->vm_file->f_path, buf, bufsize);
                if (IS_ERR(p))
                        p = "?";
-               s = strrchr(p, '/');
-               if (s)
-                       p = s+1;
+               name = kbasename(p);
        } else {
-               p = "anon";
+               name = "anon";
        }
 
        /* Generate a string description of the vma info. */
-       namelen = strlen(p);
+       namelen = strlen(name);
        remaining = (bufsize - 1) - namelen;
-       memmove(buf, p, namelen);
+       memmove(buf, name, namelen);
        snprintf(buf + namelen, remaining, "[%lx+%lx] ",
                 vma->vm_start, vma->vm_end - vma->vm_start);
 }
index 759efa337be88ebaf72cd1047151a2489a7745cb..c89b211fd9e7c093ed26e932432b329974003a04 100644 (file)
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
 EXPORT_SYMBOL(_atomic_xor);
 
 
-u64 _atomic64_xchg(u64 *v, u64 n)
+long long _atomic64_xchg(long long *v, long long n)
 {
        return __atomic64_xchg(v, __atomic_setup(v), n);
 }
 EXPORT_SYMBOL(_atomic64_xchg);
 
-u64 _atomic64_xchg_add(u64 *v, u64 i)
+long long _atomic64_xchg_add(long long *v, long long i)
 {
        return __atomic64_xchg_add(v, __atomic_setup(v), i);
 }
 EXPORT_SYMBOL(_atomic64_xchg_add);
 
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
 {
        /*
         * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
 }
 EXPORT_SYMBOL(_atomic64_xchg_add_unless);
 
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
+long long _atomic64_cmpxchg(long long *v, long long o, long long n)
 {
        return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
 }
index 82cdd8906f3d6ff70e3317a78ae00c08a0bed39e..a7ba27b2752be0923644eba19035ba3a97a6c792 100644 (file)
@@ -1,5 +1,6 @@
 config UNICORE32
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_MEMBLOCK
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_DMA_ATTRS
index ee2fb9d37745887eb16255cbd30adacbdcc1ecae..e0836de76f3c90dcb57ec0e7bbfb293cb7d0ca99 100644 (file)
@@ -22,6 +22,7 @@ config X86_64
 config X86
        def_bool y
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_AOUT if X86_32
        select HAVE_UNSTABLE_SCHED_CLOCK
        select ARCH_SUPPORTS_NUMA_BALANCING
@@ -254,10 +255,6 @@ config ARCH_HWEIGHT_CFLAGS
        default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
        default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
 
-config ARCH_CPU_PROBE_RELEASE
-       def_bool y
-       depends on HOTPLUG_CPU
-
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
@@ -860,7 +857,7 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
        ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +882,11 @@ config X86_UP_IOAPIC
 
 config X86_LOCAL_APIC
        def_bool y
-       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
 
 config X86_IO_APIC
        def_bool y
-       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
 
 config X86_VISWS_APIC
        def_bool y
@@ -1033,6 +1030,7 @@ config X86_REBOOTFIXUPS
 
 config MICROCODE
        tristate "CPU microcode loading support"
+       depends on CPU_SUP_AMD || CPU_SUP_INTEL
        select FW_LOADER
        ---help---
 
index b1977bad5435e5342af7e548700f08c1a04ff580..c8c1e700c26ed3426f0086c594a645bcc49261d9 100644 (file)
@@ -26,6 +26,7 @@
 #include <acpi/pdc_intel.h>
 
 #include <asm/numa.h>
+#include <asm/fixmap.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
index d3f5c63078d812e2f6220cfa5ad322aae920827c..89270b4318db8b97ac467717b38863fa549435ad 100644 (file)
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                 * Catch too early usage of this before alternatives
                 * have run.
                 */
-               asm goto("1: jmp %l[t_warn]\n"
+               asm_volatile_goto("1: jmp %l[t_warn]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 
 #endif
 
-               asm goto("1: jmp %l[t_no]\n"
+               asm_volatile_goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
  * have. Thus, we force the jump to the widest, 4-byte, signed relative
  * offset even though the last would often fit in less bytes.
  */
-               asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+               asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"              /* src offset */
index 64507f35800ce23e9c867e1c80b5dc3d0a835ec1..6a2cefb4395a4228cce550ef8b231f3e7158d9d1 100644 (file)
@@ -18,7 +18,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:"
+       asm_volatile_goto("1:"
                ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
                ".pushsection __jump_table,  \"aw\" \n\t"
                _ASM_ALIGN "\n\t"
index 626cf70082d7c8229b14a529625652f5646fcf09..3142a94c7b4bf90314838fa8beb67c4e7ed0b363 100644 (file)
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
 #define default_get_smp_config x86_init_uint_noop
 #endif
 
-void generic_processor_info(int apicid, int version);
+int generic_processor_info(int apicid, int version);
 #ifdef CONFIG_ACPI
 extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
index cb7502852acb0921e4e0c7132962c681f43b51ec..e139b13f2a33a3572d91e4f7297952e1bf8a1ffc 100644 (file)
@@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs);
 #ifdef CONFIG_SMP
 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 #else  /*  CONFIG_SMP  */
@@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
        wrmsr(msr_no, l, h);
        return 0;
 }
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       rdmsrl(msr_no, *q);
+       return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       wrmsrl(msr_no, q);
+       return 0;
+}
 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
                                struct msr *msrs)
 {
@@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 {
        return wrmsr_safe(msr_no, l, h);
 }
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       return wrmsrl_safe(msr_no, q);
+}
 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
        return rdmsr_safe_regs(regs);
index e7e6751648edf775ad54a42a5c05b3e6d3c1cff5..07537a44216ec9b2eed302183af7c57d8949a5a0 100644 (file)
@@ -20,7 +20,7 @@
 static inline void __mutex_fastpath_lock(atomic_t *v,
                                         void (*fail_fn)(atomic_t *))
 {
-       asm volatile goto(LOCK_PREFIX "   decl %0\n"
+       asm_volatile_goto(LOCK_PREFIX "   decl %0\n"
                          "   jns %l[exit]\n"
                          : : "m" (v->counter)
                          : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void __mutex_fastpath_unlock(atomic_t *v,
                                           void (*fail_fn)(atomic_t *))
 {
-       asm volatile goto(LOCK_PREFIX "   incl %0\n"
+       asm_volatile_goto(LOCK_PREFIX "   incl %0\n"
                          "   jg %l[exit]\n"
                          : : "m" (v->counter)
                          : "memory", "cc"
index 6aef9fbc09b7a48ec82c13e04b1f8c761dc34661..b913915e8e631f9c9ec2996fde3862bd33e2edce 100644 (file)
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
        return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
 }
 
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
 {
        unsigned long pfn;
-       int ret = 0;
+       int ret;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return mfn;
 
-       if (unlikely(mfn >= machine_to_phys_nr)) {
-               pfn = ~0;
-               goto try_override;
-       }
-       pfn = 0;
+       if (unlikely(mfn >= machine_to_phys_nr))
+               return ~0;
+
        /*
         * The array access can fail (e.g., device space beyond end of RAM).
         * In such cases it doesn't matter what we return (we return garbage),
         * but we must handle the fault without crashing!
         */
        ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
-       /* ret might be < 0 if there are no entries in the m2p for mfn */
        if (ret < 0)
-               pfn = ~0;
-       else if (get_phys_to_machine(pfn) != mfn)
+               return ~0;
+
+       return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+       unsigned long pfn;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return mfn;
+
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) != mfn) {
                /*
                 * If this appears to be a foreign mfn (because the pfn
                 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
                 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
                 */
                pfn = m2p_find_override_pfn(mfn, ~0);
+       }
 
        /* 
         * pfn is ~0 if there are no entries in the m2p for mfn or if the
index bb0465090ae53eb3246b5022d187dd4517bd39d0..940ed3fd889a743732891945b191f34cddc8e217 100644 (file)
 #define MSR_PP1_ENERGY_STATUS          0x00000641
 #define MSR_PP1_POLICY                 0x00000642
 
+#define MSR_CORE_C1_RES                        0x00000660
+
 #define MSR_AMD64_MC0_MASK             0xc0010044
 
 #define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
index 40c76604199f76e9263f94c10c249f2eee8e40dc..6c0b43bd024bb5890230ca45c61b3de082b611ef 100644 (file)
@@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
        return 0;
 }
 
-static void acpi_register_lapic(int id, u8 enabled)
+/**
+ * acpi_register_lapic - register a local apic and generates a logic cpu number
+ * @id: local apic id to register
+ * @enabled: this cpu is enabled or not
+ *
+ * Returns the logic cpu number which maps to the local apic
+ */
+static int acpi_register_lapic(int id, u8 enabled)
 {
        unsigned int ver = 0;
 
        if (id >= MAX_LOCAL_APIC) {
                printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
-               return;
+               return -EINVAL;
        }
 
        if (!enabled) {
                ++disabled_cpus;
-               return;
+               return -EINVAL;
        }
 
        if (boot_cpu_physical_apicid != -1U)
                ver = apic_version[boot_cpu_physical_apicid];
 
-       generic_processor_info(id, ver);
+       return generic_processor_info(id, ver);
 }
 
 static int __init
@@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 #endif
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       struct acpi_madt_local_apic *lapic;
-       cpumask_var_t tmp_map, new_map;
-       u8 physid;
        int cpu;
-       int retval = -ENOMEM;
-
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-               return -EINVAL;
-
-       if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
-
-       obj = buffer.pointer;
-       if (obj->type != ACPI_TYPE_BUFFER ||
-           obj->buffer.length < sizeof(*lapic)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
 
-       lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
-       if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
-           !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       physid = lapic->id;
-
-       kfree(buffer.pointer);
-       buffer.length = ACPI_ALLOCATE_BUFFER;
-       buffer.pointer = NULL;
-       lapic = NULL;
-
-       if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
-               goto out;
-
-       if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
-               goto free_tmp_map;
-
-       cpumask_copy(tmp_map, cpu_present_mask);
-       acpi_register_lapic(physid, ACPI_MADT_ENABLED);
-
-       /*
-        * If acpi_register_lapic successfully generates a new logical cpu
-        * number, then the following will get us exactly what was mapped
-        */
-       cpumask_andnot(new_map, cpu_present_mask, tmp_map);
-       if (cpumask_empty(new_map)) {
-               printk ("Unable to map lapic to logical cpu number\n");
-               retval = -EINVAL;
-               goto free_new_map;
+       cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+       if (cpu < 0) {
+               pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+               return cpu;
        }
 
        acpi_processor_set_pdc(handle);
-
-       cpu = cpumask_first(new_map);
        acpi_map_cpu2node(handle, cpu, physid);
 
        *pcpu = cpu;
-       retval = 0;
-
-free_new_map:
-       free_cpumask_var(new_map);
-free_tmp_map:
-       free_cpumask_var(tmp_map);
-out:
-       return retval;
+       return 0;
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       return _acpi_map_lsapic(handle, pcpu);
+       return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
@@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
 #ifdef CONFIG_HPET_TIMER
 #include <asm/hpet.h>
 
-static struct __initdata resource *hpet_res;
+static struct resource *hpet_res __initdata;
 
 static int __init acpi_parse_hpet(struct acpi_table_header *table)
 {
index a7eb82d9b0120d02147d8eb1620a7da1d708c282..ed165d657380062387d08da7bf2f607fff192490 100644 (file)
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
        apic_write(APIC_LVT1, value);
 }
 
-void generic_processor_info(int apicid, int version)
+int generic_processor_info(int apicid, int version)
 {
        int cpu, max = nr_cpu_ids;
        bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version)
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
                disabled_cpus++;
-               return;
+               return -ENODEV;
        }
 
        if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version)
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
                disabled_cpus++;
-               return;
+               return -EINVAL;
        }
 
        num_processors++;
@@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version)
 #endif
        set_cpu_possible(cpu, true);
        set_cpu_present(cpu, true);
+
+       return cpu;
 }
 
 int hard_smp_processor_id(void)
index 1191ac1c9d2598e64d13a1419b4a7a383933ce03..a419814cea575f9e2cf183772a9e99c59e097a11 100644 (file)
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
                break;
        case UV3_HUB_PART_NUMBER:
        case UV3_HUB_PART_NUMBER_X:
-               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
                break;
        }
 
index 8355c84b9729df767945c5f4c8a5f7c5db15fab8..9d8449158cf989af3009c6606b7a878c827f5d32 100644 (file)
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
                err = amd_pmu_init();
                break;
        default:
-               return 0;
+               err = -ENOTSUPP;
        }
        if (err != 0) {
                pr_cont("no PMU driver, software events only.\n");
@@ -1883,26 +1883,21 @@ static struct pmu pmu = {
 
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
-       userpg->cap_usr_time = 0;
-       userpg->cap_usr_time_zero = 0;
-       userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+       userpg->cap_user_time = 0;
+       userpg->cap_user_time_zero = 0;
+       userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
        userpg->pmc_width = x86_pmu.cntval_bits;
 
-       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
-               return;
-
-       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+       if (!sched_clock_stable)
                return;
 
-       userpg->cap_usr_time = 1;
+       userpg->cap_user_time = 1;
        userpg->time_mult = this_cpu_read(cyc2ns);
        userpg->time_shift = CYC2NS_SCALE_FACTOR;
        userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 
-       if (sched_clock_stable && !check_tsc_disabled()) {
-               userpg->cap_usr_time_zero = 1;
-               userpg->time_zero = this_cpu_read(cyc2ns_offset);
-       }
+       userpg->cap_user_time_zero = 1;
+       userpg->time_zero = this_cpu_read(cyc2ns_offset);
 }
 
 /*
index 9db76c31b3c311bf1f78633f87bef6853cb3db6b..f31a1655d1ff5bd602239e211b14bdd28d95a79a 100644 (file)
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 55: /* Atom 22nm "Silvermont" */
+       case 77: /* Avoton "Silvermont" */
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
index 8ed44589b0e486eda5a45efa74f3f4d3836a730f..4118f9f683151e99402740f1882b205a261a1465 100644 (file)
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
        box->hrtimer.function = uncore_pmu_hrtimer;
 }
 
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
 {
        struct intel_uncore_box *box;
        int i, size;
 
        size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
 
-       box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+       box = kzalloc_node(size, GFP_KERNEL, node);
        if (!box)
                return NULL;
 
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
        struct intel_uncore_box *fake_box;
        int ret = -EINVAL, n;
 
-       fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+       fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
        if (!fake_box)
                return -ENOMEM;
 
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        }
 
        type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
-       box = uncore_alloc_box(type, 0);
+       box = uncore_alloc_box(type, NUMA_NO_NODE);
        if (!box)
                return -ENOMEM;
 
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
                        if (pmu->func_id < 0)
                                pmu->func_id = j;
 
-                       box = uncore_alloc_box(type, cpu);
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
                        if (!box)
                                return -ENOMEM;
 
index ee11b7dfbfbb6676eb94a28360bce88b286ceac1..26d5a55a273610b8d49da975a006f156adb4e482 100644 (file)
@@ -42,15 +42,27 @@ static void __jump_label_transform(struct jump_entry *entry,
                                   int init)
 {
        union jump_code_union code;
+       const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
        const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
        if (type == JUMP_LABEL_ENABLE) {
-               /*
-                * We are enabling this jump label. If it is not a nop
-                * then something must have gone wrong.
-                */
-               if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
-                       bug_at((void *)entry->code, __LINE__);
+               if (init) {
+                       /*
+                        * Jump label is enabled for the first time.
+                        * So we expect a default_nop...
+                        */
+                       if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               } else {
+                       /*
+                        * ...otherwise expect an ideal_nop. Otherwise
+                        * something went horribly wrong.
+                        */
+                       if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               }
 
                code.jump = 0xe9;
                code.offset = entry->target -
@@ -63,7 +75,6 @@ static void __jump_label_transform(struct jump_entry *entry,
                 * are converting the default nop to the ideal nop.
                 */
                if (init) {
-                       const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
                        if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
                                bug_at((void *)entry->code, __LINE__);
                } else {
index 697b93af02ddbc0fb4461b5d853810fa250cd0a1..a0e2a8a80c94129bb3d3d47bf975ef68f98f500e 100644 (file)
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
-       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
+       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
+       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+}
+
+static __init int kvm_spinlock_init_jump(void)
+{
+       if (!kvm_para_available())
+               return 0;
+       if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
+               return 0;
 
        static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
 
-       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
-       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+       return 0;
 }
+early_initcall(kvm_spinlock_init_jump);
+
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
index 7123b5df479d872def8ff437fcd407c5c4d5ca50..af99f71aeb7f159a6ba7f1da0596ae2843d67e47 100644 (file)
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
                c->microcode = rev;
+               uci->cpu_sig.rev = rev;
                return 0;
        }
 
index 563ed91e6faa3a2adac62ddeb8caff7e5ad50f24..7e920bff99a34b260e1ea338c3fc7dee44ba0e67 100644 (file)
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
                },
        },
+       {       /* Handle problems with rebooting on the Latitude E5410. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E5410",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
+               },
+       },
        {       /* Handle problems with rebooting on the Latitude E5420. */
                .callback = set_pci_reboot,
                .ident = "Dell Latitude E5420",
@@ -352,12 +360,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
        },
        {       /* Handle problems with rebooting on the Precision M6600. */
                .callback = set_pci_reboot,
-               .ident = "Dell OptiPlex 990",
+               .ident = "Dell Precision M6600",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
                },
        },
+       {       /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
+       {       /* Some C6100 machines were shipped with vendor being 'Dell'. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
        { }
 };
 
index 6cacab671f9b76a35aaf49d41431b12e788e5bbe..e73b3f53310c7663b2c37a3cffd57c0cad974a3c 100644 (file)
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock(void)
-{
-       mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock(void)
-{
-       mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
-#endif
-
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
index 22513e96b0125c54510722afab5f7abab8f73405..86179d4098938d50e13f25b6e75cfe8a198b7b80 100644 (file)
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si,
         * the part that is occupied by the framebuffer */
        len = mode->height * mode->stride;
        len = PAGE_ALIGN(len);
-       if (len > si->lfb_size << 16) {
+       if (len > (u64)si->lfb_size << 16) {
                printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
                return -EINVAL;
        }
 
        /* setup IORESOURCE_MEM as framebuffer memory */
        memset(&res, 0, sizeof(res));
-       res.flags = IORESOURCE_MEM;
+       res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        res.name = simplefb_resname;
        res.start = si->lfb_base;
        res.end = si->lfb_base + len - 1;
index 6e60b5fe224481d01ca8662c6edd01432da08518..649b010da00ba5a0c191a4144f2458c08f0500ab 100644 (file)
@@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action)
        if (!cpu_is_hotpluggable(cpu))
                return -EINVAL;
 
-       cpu_hotplug_driver_lock();
+       lock_device_hotplug();
 
        switch (action) {
        case 0:
                ret = cpu_down(cpu);
                if (!ret) {
                        pr_info("CPU %u is now offline\n", cpu);
+                       dev->offline = true;
                        kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
                } else
                        pr_debug("Can't offline CPU%d.\n", cpu);
                break;
        case 1:
                ret = cpu_up(cpu);
-               if (!ret)
+               if (!ret) {
+                       dev->offline = false;
                        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-               else
+               } else {
                        pr_debug("Can't online CPU%d.\n", cpu);
+               }
                break;
        default:
                ret = -EINVAL;
        }
 
-       cpu_hotplug_driver_unlock();
+       unlock_device_hotplug();
 
        return ret;
 }
index a1216de9ffda3b8ed6c38565c66e1e5c7c87c313..2b2fce1b200900b1af42865f946d5faa25fdc56a 100644 (file)
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 
 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
        if (!test_bit(VCPU_EXREG_PDPTR,
                      (unsigned long *)&vcpu->arch.regs_dirty))
                return;
 
        if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
-               vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
-               vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
-               vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
-               vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
+               vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
+               vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
+               vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
+               vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
        }
 }
 
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
        if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
-               vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
-               vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
-               vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
-               vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
+               mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+               mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+               mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+               mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
        }
 
        __set_bit(VCPU_EXREG_PDPTR,
@@ -5345,7 +5349,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * There are errata that may cause this bit to not be set:
         * AAK134, BY25.
         */
-       if (exit_qualification & INTR_INFO_UNBLOCK_NMI)
+       if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       cpu_has_virtual_nmis() &&
+                       (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
 
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -7775,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
                vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
                vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
-               __clear_bit(VCPU_EXREG_PDPTR,
-                               (unsigned long *)&vcpu->arch.regs_avail);
-               __clear_bit(VCPU_EXREG_PDPTR,
-                               (unsigned long *)&vcpu->arch.regs_dirty);
        }
 
        kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
index a6b1b86d225358f05e58a649fdadedc4eafdb87e..518532e6a3faa213eb0833c6781c9ceba34dea86 100644 (file)
@@ -47,6 +47,21 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
 }
 EXPORT_SYMBOL(rdmsr_on_cpu);
 
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+       *q = rv.reg.q;
+
+       return err;
+}
+EXPORT_SYMBOL(rdmsrl_on_cpu);
+
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 {
        int err;
@@ -63,6 +78,22 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 }
 EXPORT_SYMBOL(wrmsr_on_cpu);
 
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       rv.reg.q = q;
+
+       err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+       return err;
+}
+EXPORT_SYMBOL(wrmsrl_on_cpu);
+
 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
                            struct msr *msrs,
                            void (*msr_func) (void *info))
@@ -159,6 +190,37 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 }
 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
 
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       rv.reg.q = q;
+
+       err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+       return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+       *q = rv.reg.q;
+
+       return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+
 /*
  * These variants are significantly slower, but allows control over
  * the entire 32-bit GPR set.
index 79c216aa0e2baaac3a65a43972161922489c4978..516593e1ce33b92175195c8de2983258b723493c 100644 (file)
@@ -772,13 +772,21 @@ out:
        return;
 }
 
+static void bpf_jit_free_deferred(struct work_struct *work)
+{
+       struct sk_filter *fp = container_of(work, struct sk_filter, work);
+       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+       struct bpf_binary_header *header = (void *)addr;
+
+       set_memory_rw(addr, header->pages);
+       module_free(NULL, header);
+       kfree(fp);
+}
+
 void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter) {
-               unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-               struct bpf_binary_header *header = (void *)addr;
-
-               set_memory_rw(addr, header->pages);
-               module_free(NULL, header);
+               INIT_WORK(&fp->work, bpf_jit_free_deferred);
+               schedule_work(&fp->work);
        }
 }
index f5809fa2753e69246f8d361d844c212e03d139e9..b046e070e08868f72e62b5ce804ffedd1689cc03 100644 (file)
@@ -231,7 +231,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
        offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
 
        if ((offset) && (where == offset))
-               value = value & 0xfffffffc;
+               value = value & ~PCI_EXP_LNKCTL_ASPMC;
 
        return raw_pci_write(pci_domain_nr(bus), bus->number,
                                                devfn, where, size, value);
@@ -252,7 +252,7 @@ static struct pci_ops quirk_pcie_aspm_ops = {
  */
 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
 {
-       int cap_base, i;
+       int i;
        struct pci_bus  *pbus;
        struct pci_dev *dev;
 
@@ -278,7 +278,7 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
                for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
                        quirk_aspm_offset[i] = 0;
 
-               pbus->ops = pbus->parent->ops;
+               pci_bus_set_ops(pbus, pbus->parent->ops);
        } else {
                /*
                 * If devices are attached to the root port at power-up or
@@ -286,13 +286,15 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
                 * each root port to save the register offsets and replace the
                 * bus ops.
                 */
-               list_for_each_entry(dev, &pbus->devices, bus_list) {
+               list_for_each_entry(dev, &pbus->devices, bus_list)
                        /* There are 0 to 8 devices attached to this bus */
-                       cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
-                       quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
-               }
-               pbus->ops = &quirk_pcie_aspm_ops;
+                       quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
+                               dev->pcie_cap + PCI_EXP_LNKCTL;
+
+               pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
+               dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
        }
+
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_MCH_PA,     pcie_rootport_aspm_quirk);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_MCH_PA1,    pcie_rootport_aspm_quirk);
index 5596c7bdd327b1af38138a3d32be36be3e21cb17..082e88129712b4eb9e2027852c890a02ff31a1c7 100644 (file)
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
                return -ENODEV;
 
-       if (start > end || !addr)
+       if (start > end)
                return -EINVAL;
 
        mutex_lock(&pci_mmcfg_lock);
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
                return -EEXIST;
        }
 
+       if (!addr) {
+               mutex_unlock(&pci_mmcfg_lock);
+               return -EINVAL;
+       }
+
        rc = -EBUSY;
        cfg = pci_mmconfig_alloc(seg, start, end, addr);
        if (cfg == NULL) {
index 90f6ed127096566ab06c0a9e6f25a355ccdfc048..c7e22ab29a5a2eb6ce3dd75f0fd3e0b26be2d61b 100644 (file)
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_CODE &&
-                   md->type != EFI_BOOT_SERVICES_DATA)
-                       continue;
+               if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+                       if (md->type != EFI_BOOT_SERVICES_CODE &&
+                           md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+                               continue;
+               }
 
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
index fef7d0ba7e3a93d8e44ed09cec92db708759cbcf..649a12befba92f399895b947cc779e0269ac750d 100644 (file)
@@ -40,16 +40,9 @@ static bool                          lid_wake_on_close;
  */
 static int set_lid_wake_behavior(bool wake_on_close)
 {
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
        acpi_status status;
 
-       arg_list.count          = 1;
-       arg_list.pointer        = &arg;
-       arg.type                = ACPI_TYPE_INTEGER;
-       arg.integer.value       = wake_on_close;
-
-       status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
+       status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
        if (ACPI_FAILURE(status)) {
                pr_warning(PFX "failed to set lid behavior\n");
                return 1;
index 8b901e8d782dadf00091ac88a6fc859c54c4d1de..a61c7d5811beac47e2549cd6fd44118bc5bf7b28 100644 (file)
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
         * frontend pages while they are being shared with the backend,
         * because mfn_to_pfn (that ends up being called by GUPF) will
         * return the backend pfn rather than the frontend pfn. */
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == mfn)
                set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
 
        return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
         * the original pfn causes mfn_to_pfn(mfn) to return the frontend
         * pfn again. */
        mfn &= ~FOREIGN_FRAME_BIT;
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
                        m2p_find_override(mfn) == NULL)
                set_phys_to_machine(pfn, mfn);
 
index d1e4777b4e75352fea33ca6a3e3d43a679e19c6d..31d04758b76f6a2da41c309e368768d4881066d0 100644 (file)
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
                   old memory can be recycled */
                make_lowmem_page_readwrite(xen_initial_gdt);
 
+#ifdef CONFIG_X86_32
+               /*
+                * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+                * expects __USER_DS
+                */
+               loadsegment(ds, __USER_DS);
+               loadsegment(es, __USER_DS);
+#endif
+
                xen_filter_cpu_maps();
                xen_setup_vcpu_info_placement();
        }
index 253f63fceea104978c6e5d1f54ba81e1e3581b20..be6b8607895748304c860c61603d24a29bacfa34 100644 (file)
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
 }
 
 
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
 void __init xen_init_spinlocks(void)
 {
 
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
                return;
        }
 
-       static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
 }
 
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+       if (!xen_pvspin)
+               return 0;
+
+       static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
 static __init int xen_parse_nopvspin(char *arg)
 {
        xen_pvspin = false;
index c114483010c13b70caf62b3edea8b46947ca17e8..7db5c22faa68a1803cad4708d9047ad8396b637e 100644 (file)
@@ -87,4 +87,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _XTENSA_SOCKET_H */
index 7f38e40fee0819a74ec6b3d10c759c07d3b04bb4..2429515c05c2ed1b6f457a60def817c33c178651 100644 (file)
@@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
 
        See Documentation/cgroups/blkio-controller.txt for more information.
 
-config CMDLINE_PARSER
+config BLK_CMDLINE_PARSER
        bool "Block device command line partition parser"
        default n
        ---help---
-       Parsing command line, get the partitions information.
+       Enabling this option allows you to specify the partition layout from
+       the kernel boot args.  This is typically of use for embedded devices
+       which don't otherwise have any standardized method for listing the
+       partitions on a block device.
+
+       See Documentation/block/cmdline-partition.txt for more information.
 
 menu "Partition Types"
 
index 4fa4be544ece94c05d0956a0c6f488c1f7a65161..671a83d063a5ba290c93efc08457eba6217ea80d 100644 (file)
@@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ)     += cfq-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)     += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)        += blk-integrity.o
-obj-$(CONFIG_CMDLINE_PARSER)   += cmdline-parser.o
+obj-$(CONFIG_BLK_CMDLINE_PARSER)       += cmdline-parser.o
index e90c7c164c83b8dc58f36393bb5748c0110e6184..4e491d9b529255476939b8608d91718c40140059 100644 (file)
@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        blkg->online = true;
        spin_unlock(&blkcg->lock);
 
-       if (!ret)
+       if (!ret) {
+               if (blkcg == &blkcg_root) {
+                       q->root_blkg = blkg;
+                       q->root_rl.blkg = blkg;
+               }
                return blkg;
+       }
 
        /* @blkg failed fully initialized, use the usual release path */
        blkg_put(blkg);
@@ -334,6 +339,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 
+       /*
+        * If root blkg is destroyed.  Just clear the pointer since root_rl
+        * does not take reference on root blkg.
+        */
+       if (blkcg == &blkcg_root) {
+               blkg->q->root_blkg = NULL;
+               blkg->q->root_rl.blkg = NULL;
+       }
+
        /*
         * Put the reference taken at the time of creation so that when all
         * queues are gone, group can be destroyed.
@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
-
-       /*
-        * root blkg is destroyed.  Just clear the pointer since
-        * root_rl does not take reference on root blkg.
-        */
-       q->root_blkg = NULL;
-       q->root_rl.blkg = NULL;
 }
 
 /*
@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
                ret = PTR_ERR(blkg);
                goto out_unlock;
        }
-       q->root_blkg = blkg;
-       q->root_rl.blkg = blkg;
 
        list_for_each_entry(blkg, &q->blkg_list, q_node)
                cnt++;
index c04505358342e132a0a3164cd154d12d52a7ad98..0a00e4ecf87cae37a3d310c8dfd32869ac24c161 100644 (file)
@@ -1549,11 +1549,9 @@ get_rq:
        if (plug) {
                /*
                 * If this is the first request added after a plug, fire
-                * of a plug trace. If others have been added before, check
-                * if we have multiple devices in this plug. If so, make a
-                * note to sort the list before dispatch.
+                * of a plug trace.
                 */
-               if (list_empty(&plug->list))
+               if (!request_count)
                        trace_block_plug(q);
                else {
                        if (request_count >= BLK_MAX_REQUEST_COUNT) {
index e7062139612914b95917405fc3da52e4498f066d..ae4f27d7944e9a662ddbb51464bd18e10f4707a4 100644 (file)
@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        spin_lock_irq(q->queue_lock);
 
        if (unlikely(blk_queue_dying(q))) {
+               rq->cmd_flags |= REQ_QUIET; 
                rq->errors = -ENXIO;
-               if (rq->end_io)
-                       rq->end_io(rq, rq->errors);
+               __blk_end_request_all(rq, rq->errors);
                spin_unlock_irq(q->queue_lock);
                return;
        }
index c50ecf0ea3b17c652db8c134905de38e56713851..026c1517505f2aaab4780a15735850104abbf7eb 100644 (file)
@@ -195,17 +195,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
  *
  * Description:
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @dma_mask.
+ *    buffers for doing I/O to pages residing above @max_addr.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 {
-       unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+       unsigned long b_pfn = max_addr >> PAGE_SHIFT;
        int dma = 0;
 
        q->bounce_gfp = GFP_NOIO;
index dabb9d02cf9a509655f67ae169ac0c3a706d8f8e..434944cbd761884f0f6d9ffdfe96adc00afd1e5c 100644 (file)
@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
 
        if (samples) {
                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
-               do_div(v, samples);
+               v = div64_u64(v, samples);
        }
        __blkg_prfill_u64(sf, pd, v);
        return 0;
@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
        if (!eq)
                return -ENOMEM;
 
-       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+       cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
        if (!cfqd) {
                kobject_put(&eq->kobj);
                return -ENOMEM;
index 20614a33236220d0e79d99a2cfaf99822327bc57..9ef66406c625412b79296d6e4f9186611b95b6f6 100644 (file)
@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
        if (!eq)
                return -ENOMEM;
 
-       dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
        if (!dd) {
                kobject_put(&eq->kobj);
                return -ENOMEM;
index 668394d185885bc3fcc878697878a4cb07be899e..2bcbd8cc14d4b23780a3cfc262857a87f7c5d67e 100644 (file)
@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
 {
        struct elevator_queue *eq;
 
-       eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+       eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
        if (unlikely(!eq))
                goto err;
 
index dadf42b454a383299231fa46abee1631aab53196..791f419431322882a915f88995df7b72552d5507 100644 (file)
@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
 {
        struct gendisk *disk;
 
-       disk = kmalloc_node(sizeof(struct gendisk),
-                               GFP_KERNEL | __GFP_ZERO, node_id);
+       disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
        if (disk) {
                if (!init_part_stats(&disk->part0)) {
                        kfree(disk);
index 87a32086535d5228b513af3935d29765b763b2ec..9b29a996c3114a3d09c41abdcaaa0f3f19abe3ff 100644 (file)
@@ -263,7 +263,7 @@ config SYSV68_PARTITION
 
 config CMDLINE_PARTITION
        bool "Command line partition support" if PARTITION_ADVANCED
-       select CMDLINE_PARSER
+       select BLK_CMDLINE_PARSER
        help
-         Say Y here if you would read the partitions table from bootargs.
+         Say Y here if you want to read the partition table from bootargs.
          The format for the command line is just like mtdparts.
index 56cf4ffad51ed903128ac079d9845acafbd639a4..5141b563adf1b9a41c6b6054a8d2dcf5618da4d6 100644 (file)
@@ -2,15 +2,15 @@
  * Copyright (C) 2013 HUAWEI
  * Author: Cai Zhiyong <caizhiyong@huawei.com>
  *
- * Read block device partition table from command line.
- * The partition used for fixed block device (eMMC) embedded device.
- * It is no MBR, save storage space. Bootloader can be easily accessed
+ * Read block device partition table from the command line.
+ * Typically used for fixed block (eMMC) embedded devices.
+ * It has no MBR, so saves storage space. Bootloader can be easily accessed
  * by absolute address of data on the block device.
  * Users can easily change the partition.
  *
  * The format for the command line is just like mtdparts.
  *
- * Verbose config please reference "Documentation/block/cmdline-partition.txt"
+ * For further information, see "Documentation/block/cmdline-partition.txt"
  *
  */
 
index 1eb09ee5311b414e448b28eb26a72f7ff6bbaa1c..a8287b49d0621d1778295ad0516c8ccbf22ed0fa 100644 (file)
@@ -222,11 +222,16 @@ check_hybrid:
         * the disk size.
         *
         * Hybrid MBRs do not necessarily comply with this.
+        *
+        * Consider a bad value here to be a warning to support dd'ing
+        * an image from a smaller disk to a larger disk.
         */
        if (ret == GPT_MBR_PROTECTIVE) {
                sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
                if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
-                       ret = 0;
+                       pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
+                                sz, min_t(uint32_t,
+                                          total_sectors - 1, 0xFFFFFFFF));
        }
 done:
        return ret;
index 69ce573f1224560b4f5c7532e21053b27c651da4..71f337aefa3905feaca892b7ac3b49b4bcb411e3 100644 (file)
@@ -776,6 +776,22 @@ config CRYPTO_AES_ARM
 
          See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_ARM_BS
+       tristate "Bit sliced AES using NEON instructions"
+       depends on ARM && KERNEL_MODE_NEON
+       select CRYPTO_ALGAPI
+       select CRYPTO_AES_ARM
+       select CRYPTO_ABLK_HELPER
+       help
+         Use a faster and more secure NEON based implementation of AES in CBC,
+         CTR and XTS modes
+
+         Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+         and for XTS mode encryption, CBC and XTS mode decryption speedup is
+         around 25%. (CBC encryption speed is not affected by this driver.)
+         This implementation does not rely on any lookup tables so it is
+         believed to be invulnerable to cache timing attacks.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        select CRYPTO_ALGAPI
index 7be34248b450896cfc056d1709513df4f172f79a..39ea4791a3c977ad7eda47498b9f7d7741b438e2 100644 (file)
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
                }
                device->device_issue_pending(chan);
        } else {
-               if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+               if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
                        panic("%s: DMA error waiting for depend_tx\n",
                              __func__);
                tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
                 * we are referring to the correct operation
                 */
                BUG_ON(async_tx_test_ack(*tx));
-               if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+               if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
                        panic("%s: DMA error waiting for transaction\n",
                              __func__);
                async_tx_ack(*tx);
index aa43b911ccef582c78e70a914106ba454de2c05b..969e9871785ca5ae47e0153ef96c15c460daca18 100644 (file)
@@ -166,4 +166,6 @@ source "drivers/reset/Kconfig"
 
 source "drivers/fmc/Kconfig"
 
+source "drivers/powercap/Kconfig"
+
 endmenu
index ab93de8297f1338fc2440967ce8934a7ff6bc043..34c1d554f69b489fc523a811e9f1fc76b5a5cd4e 100644 (file)
@@ -152,3 +152,4 @@ obj-$(CONFIG_VME_BUS)               += vme/
 obj-$(CONFIG_IPACK_BUS)                += ipack/
 obj-$(CONFIG_NTB)              += ntb/
 obj-$(CONFIG_FMC)              += fmc/
+obj-$(CONFIG_POWERCAP)         += powercap/
index 22327e6a7236fc367313ec5acf45016d354863d4..589da059ce3939c9a2139907daefbf00a5a9b0e2 100644 (file)
@@ -24,7 +24,7 @@ menuconfig ACPI
          are configured, ACPI is used.
 
          The project home page for the Linux ACPI subsystem is here:
-         <http://www.lesswatts.org/projects/acpi/>
+         <https://01.org/linux-acpi>
 
          Linux support for ACPI is based on Intel Corporation's ACPI
          Component Architecture (ACPI CA).  For more information on the
@@ -56,23 +56,6 @@ config ACPI_PROCFS
 
          Say N to delete /proc/acpi/ files that have moved to /sys/
 
-config ACPI_PROCFS_POWER
-       bool "Deprecated power /proc/acpi directories"
-       depends on PROC_FS
-       help
-         For backwards compatibility, this option allows
-          deprecated power /proc/acpi/ directories to exist, even when
-          they have been replaced by functions in /sys.
-          The deprecated directories (and their replacements) include:
-         /proc/acpi/battery/* (/sys/class/power_supply/*)
-         /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
-         This option has no effect on /proc/acpi/ directories
-         and functions, which do not yet exist in /sys
-         This option, together with the proc directories, will be
-         deleted in 2.6.39.
-
-         Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
@@ -123,9 +106,9 @@ config ACPI_BUTTON
        default y
        help
          This driver handles events on the power, sleep, and lid buttons.
-         A daemon reads /proc/acpi/event and perform user-defined actions
-         such as shutting down the system.  This is necessary for
-         software-controlled poweroff.
+         A daemon reads events from input devices or via netlink and
+         performs user-defined actions such as shutting down the system.
+         This is necessary for software-controlled poweroff.
 
          To compile this driver as a module, choose M here:
          the module will be called button.
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR
 
          To compile this driver as a module, choose M here:
          the module will be called processor.
+
 config ACPI_IPMI
        tristate "IPMI"
-       depends on IPMI_SI && IPMI_HANDLER
+       depends on IPMI_SI
        default n
        help
          This driver enables the ACPI to access the BMC controller. And it
index cdaf68b58b006fb833f104f2d5e28baa4ecaf991..a55fc06db4ae705b47aa8ca9f2b33d456057c536 100644 (file)
@@ -47,7 +47,6 @@ acpi-y                                += sysfs.o
 acpi-$(CONFIG_X86)             += acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)                += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
index f37beaa32750106dffdf9fd7aeac764f1f0094f2..b9f0d5f4bba51cecb6ffe8fda126762850818263 100644 (file)
 #include <linux/types.h>
 #include <linux/dmi.h>
 #include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
-       {"ACPI0003", 0},
-       {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
 static int ac_sleep_before_get_state_ms;
 
-static struct acpi_driver acpi_ac_driver = {
-       .name = "ac",
-       .class = ACPI_AC_CLASS,
-       .ids = ac_device_ids,
-       .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
-       .ops = {
-               .add = acpi_ac_add,
-               .remove = acpi_ac_remove,
-               .notify = acpi_ac_notify,
-               },
-       .drv.pm = &acpi_ac_pm,
-};
-
 struct acpi_ac {
        struct power_supply charger;
-       struct acpi_device * device;
+       struct acpi_device *adev;
+       struct platform_device *pdev;
        unsigned long long state;
 };
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_ac_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-#endif
-
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-       acpi_status status = AE_OK;
-
-
-       if (!ac)
-               return -EINVAL;
+       acpi_status status;
 
-       status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+       status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+                                      &ac->state);
        if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Error reading AC Adapter state"));
                ac->state = ACPI_AC_STATUS_UNKNOWN;
                return -ENODEV;
        }
@@ -160,91 +112,13 @@ static enum power_supply_property ac_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_ac *ac = seq->private;
-
-
-       if (!ac)
-               return 0;
-
-       if (acpi_ac_get_state(ac)) {
-               seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
-               return 0;
-       }
-
-       seq_puts(seq, "state:                   ");
-       switch (ac->state) {
-       case ACPI_AC_STATUS_OFFLINE:
-               seq_puts(seq, "off-line\n");
-               break;
-       case ACPI_AC_STATUS_ONLINE:
-               seq_puts(seq, "on-line\n");
-               break;
-       default:
-               seq_puts(seq, "unknown\n");
-               break;
-       }
-
-       return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_ac_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       /* 'state' [R] */
-       entry = proc_create_data(ACPI_AC_FILE_STATE,
-                                S_IRUGO, acpi_device_dir(device),
-                                &acpi_ac_fops, acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-       return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
-               remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
 {
-       struct acpi_ac *ac = acpi_driver_data(device);
-
+       struct acpi_ac *ac = data;
 
        if (!ac)
                return;
@@ -267,10 +141,10 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               acpi_bus_generate_netlink_event(device->pnp.device_class,
-                                                 dev_name(&device->dev), event,
-                                                 (u32) ac->state);
-               acpi_notifier_call_chain(device, event, (u32) ac->state);
+               acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+                                               dev_name(&ac->pdev->dev),
+                                               event, (u32) ac->state);
+               acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
        }
 
@@ -295,53 +169,55 @@ static struct dmi_system_id ac_dmi_table[] = {
        {},
 };
 
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
 {
        int result = 0;
        struct acpi_ac *ac = NULL;
+       struct acpi_device *adev;
 
-
-       if (!device)
+       if (!pdev)
                return -EINVAL;
 
+       result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+       if (result)
+               return -ENODEV;
+
        ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
        if (!ac)
                return -ENOMEM;
 
-       ac->device = device;
-       strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
-       strcpy(acpi_device_class(device), ACPI_AC_CLASS);
-       device->driver_data = ac;
+       strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+       strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+       ac->adev = adev;
+       ac->pdev = pdev;
+       platform_set_drvdata(pdev, ac);
 
        result = acpi_ac_get_state(ac);
        if (result)
                goto end;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_ac_add_fs(device);
-#endif
-       if (result)
-               goto end;
-       ac->charger.name = acpi_device_bid(device);
+       ac->charger.name = acpi_device_bid(adev);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       result = power_supply_register(&ac->device->dev, &ac->charger);
+       result = power_supply_register(&pdev->dev, &ac->charger);
        if (result)
                goto end;
 
+       result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+       if (result) {
+               power_supply_unregister(&ac->charger);
+               goto end;
+       }
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-              acpi_device_name(device), acpi_device_bid(device),
+              acpi_device_name(adev), acpi_device_bid(adev),
               ac->state ? "on-line" : "off-line");
 
-      end:
-       if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_ac_remove_fs(device);
-#endif
+end:
+       if (result)
                kfree(ac);
-       }
 
        dmi_check_system(ac_dmi_table);
        return result;
@@ -356,7 +232,7 @@ static int acpi_ac_resume(struct device *dev)
        if (!dev)
                return -EINVAL;
 
-       ac = acpi_driver_data(to_acpi_device(dev));
+       ac = platform_get_drvdata(to_platform_device(dev));
        if (!ac)
                return -EINVAL;
 
@@ -368,28 +244,44 @@ static int acpi_ac_resume(struct device *dev)
        return 0;
 }
 #endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
 {
-       struct acpi_ac *ac = NULL;
-
+       struct acpi_ac *ac;
 
-       if (!device || !acpi_driver_data(device))
+       if (!pdev)
                return -EINVAL;
 
-       ac = acpi_driver_data(device);
+       acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
 
+       ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_remove_fs(device);
-#endif
 
        kfree(ac);
 
        return 0;
 }
 
+static const struct acpi_device_id acpi_ac_match[] = {
+       { "ACPI0003", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+       .probe          = acpi_ac_probe,
+       .remove         = acpi_ac_remove,
+       .driver         = {
+               .name   = "acpi-ac",
+               .owner  = THIS_MODULE,
+               .pm     = &acpi_ac_pm_ops,
+               .acpi_match_table = ACPI_PTR(acpi_ac_match),
+       },
+};
+
 static int __init acpi_ac_init(void)
 {
        int result;
@@ -397,34 +289,16 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_dir = acpi_lock_ac_dir();
-       if (!acpi_ac_dir)
+       result = platform_driver_register(&acpi_ac_driver);
+       if (result < 0)
                return -ENODEV;
-#endif
-
-       result = acpi_bus_register_driver(&acpi_ac_driver);
-       if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-               return -ENODEV;
-       }
 
        return 0;
 }
 
 static void __exit acpi_ac_exit(void)
 {
-
-       acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
-       return;
+       platform_driver_unregister(&acpi_ac_driver);
 }
-
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index f40acef80269fa9e6a38435b1c328a7c80552b34..ac0f52f6df2b862052a3de423ac039a6bd2eefe8 100644 (file)
@@ -1,8 +1,9 @@
 /*
  *  acpi_ipmi.c - ACPI IPMI opregion
  *
- *  Copyright (C) 2010 Intel Corporation
- *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *  Copyright (C) 2010, 2013 Intel Corporation
+ *    Author: Zhao Yakui <yakui.zhao@intel.com>
+ *            Lv Zheng <lv.zheng@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
 #include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
+#include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
 MODULE_LICENSE("GPL");
 
-#define IPMI_FLAGS_HANDLER_INSTALL     0
-
 #define ACPI_IPMI_OK                   0
 #define ACPI_IPMI_TIMEOUT              0x10
 #define ACPI_IPMI_UNKNOWN              0x07
 /* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT                   (5 * HZ)
+#define IPMI_TIMEOUT                   (5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH       64
 
 struct acpi_ipmi_device {
        /* the device list attached to driver_data.ipmi_devices */
        struct list_head head;
+
        /* the IPMI request message list */
        struct list_head tx_msg_list;
-       struct mutex    tx_msg_lock;
+
+       spinlock_t tx_msg_lock;
        acpi_handle handle;
-       struct pnp_dev *pnp_dev;
-       ipmi_user_t     user_interface;
+       struct device *dev;
+       ipmi_user_t user_interface;
        int ipmi_ifnum; /* IPMI interface number */
        long curr_msgid;
-       unsigned long flags;
-       struct ipmi_smi_info smi_data;
+       bool dead;
+       struct kref kref;
 };
 
 struct ipmi_driver_data {
-       struct list_head        ipmi_devices;
-       struct ipmi_smi_watcher bmc_events;
-       struct ipmi_user_hndl   ipmi_hndlrs;
-       struct mutex            ipmi_lock;
+       struct list_head ipmi_devices;
+       struct ipmi_smi_watcher bmc_events;
+       struct ipmi_user_hndl ipmi_hndlrs;
+       struct mutex ipmi_lock;
+
+       /*
+        * NOTE: IPMI System Interface Selection
+        * There is no system interface specified by the IPMI operation
+        * region access.  We try to select one system interface with ACPI
+        * handle set.  IPMI messages passed from the ACPI codes are sent
+        * to this selected global IPMI system interface.
+        */
+       struct acpi_ipmi_device *selected_smi;
 };
 
 struct acpi_ipmi_msg {
        struct list_head head;
+
        /*
         * General speaking the addr type should be SI_ADDR_TYPE. And
         * the addr channel should be BMC.
@@ -85,30 +85,31 @@ struct acpi_ipmi_msg {
         */
        struct ipmi_addr addr;
        long tx_msgid;
+
        /* it is used to track whether the IPMI message is finished */
        struct completion tx_complete;
+
        struct kernel_ipmi_msg tx_message;
-       int     msg_done;
-       /* tx data . And copy it from ACPI object buffer */
-       u8      tx_data[64];
-       int     tx_len;
-       u8      rx_data[64];
-       int     rx_len;
+       int msg_done;
+
+       /* tx/rx data . And copy it from/to ACPI object buffer */
+       u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+       u8 rx_len;
+
        struct acpi_ipmi_device *device;
+       struct kref kref;
 };
 
 /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
 struct acpi_ipmi_buffer {
        u8 status;
        u8 length;
-       u8 data[64];
+       u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
 };
 
 static void ipmi_register_bmc(int iface, struct device *dev);
 static void ipmi_bmc_gone(int iface);
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
 
 static struct ipmi_driver_data driver_data = {
        .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -120,50 +121,174 @@ static struct ipmi_driver_data driver_data = {
        .ipmi_hndlrs = {
                .ipmi_recv_hndl = ipmi_msg_handler,
        },
+       .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 };
 
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
 {
+       struct acpi_ipmi_device *ipmi_device;
+       int err;
+       ipmi_user_t user;
+
+       ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+       if (!ipmi_device)
+               return NULL;
+
+       kref_init(&ipmi_device->kref);
+       INIT_LIST_HEAD(&ipmi_device->head);
+       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+       spin_lock_init(&ipmi_device->tx_msg_lock);
+       ipmi_device->handle = handle;
+       ipmi_device->dev = get_device(dev);
+       ipmi_device->ipmi_ifnum = iface;
+
+       err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+                              ipmi_device, &user);
+       if (err) {
+               put_device(dev);
+               kfree(ipmi_device);
+               return NULL;
+       }
+       ipmi_device->user_interface = user;
+
+       return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+       ipmi_destroy_user(ipmi_device->user_interface);
+       put_device(ipmi_device->dev);
+       kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+       struct acpi_ipmi_device *ipmi =
+               container_of(kref, struct acpi_ipmi_device, kref);
+
+       ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+       list_del(&ipmi_device->head);
+       if (driver_data.selected_smi == ipmi_device)
+               driver_data.selected_smi = NULL;
+
+       /*
+        * Always setting dead flag after deleting from the list or
+        * list_for_each_entry() codes must get changed.
+        */
+       ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+       struct acpi_ipmi_device *ipmi_device = NULL;
+
+       mutex_lock(&driver_data.ipmi_lock);
+       if (driver_data.selected_smi) {
+               ipmi_device = driver_data.selected_smi;
+               kref_get(&ipmi_device->kref);
+       }
+       mutex_unlock(&driver_data.ipmi_lock);
+
+       return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+       kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
+{
+       struct acpi_ipmi_device *ipmi;
        struct acpi_ipmi_msg *ipmi_msg;
-       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+       ipmi = acpi_ipmi_dev_get();
+       if (!ipmi)
+               return NULL;
 
        ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
-       if (!ipmi_msg)  {
-               dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+       if (!ipmi_msg) {
+               acpi_ipmi_dev_put(ipmi);
                return NULL;
        }
+
+       kref_init(&ipmi_msg->kref);
        init_completion(&ipmi_msg->tx_complete);
        INIT_LIST_HEAD(&ipmi_msg->head);
        ipmi_msg->device = ipmi;
+       ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
        return ipmi_msg;
 }
 
-#define                IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
-#define                IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
-                               acpi_physical_address address,
-                               acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+       acpi_ipmi_dev_put(tx_msg->device);
+       kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+       struct acpi_ipmi_msg *tx_msg =
+               container_of(kref, struct acpi_ipmi_msg, kref);
+
+       ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+       kref_get(&tx_msg->kref);
+
+       return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+       kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset)      ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset)                (offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+                                   acpi_physical_address address,
+                                   acpi_integer *value)
 {
        struct kernel_ipmi_msg *msg;
        struct acpi_ipmi_buffer *buffer;
        struct acpi_ipmi_device *device;
+       unsigned long flags;
 
        msg = &tx_msg->tx_message;
+
        /*
         * IPMI network function and command are encoded in the address
         * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
         */
        msg->netfn = IPMI_OP_RGN_NETFN(address);
        msg->cmd = IPMI_OP_RGN_CMD(address);
-       msg->data = tx_msg->tx_data;
+       msg->data = tx_msg->data;
+
        /*
         * value is the parameter passed by the IPMI opregion space handler.
         * It points to the IPMI request message buffer
         */
        buffer = (struct acpi_ipmi_buffer *)value;
+
        /* copy the tx message data */
+       if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+               dev_WARN_ONCE(tx_msg->device->dev, true,
+                             "Unexpected request (msg len %d).\n",
+                             buffer->length);
+               return -EINVAL;
+       }
        msg->data_len = buffer->length;
-       memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+       memcpy(tx_msg->data, buffer->data, msg->data_len);
+
        /*
         * now the default type is SYSTEM_INTERFACE and channel type is BMC.
         * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -177,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 
        /* Get the msgid */
        device = tx_msg->device;
-       mutex_lock(&device->tx_msg_lock);
+
+       spin_lock_irqsave(&device->tx_msg_lock, flags);
        device->curr_msgid++;
        tx_msg->tx_msgid = device->curr_msgid;
-       mutex_unlock(&device->tx_msg_lock);
+       spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+       return 0;
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
-               acpi_integer *value, int rem_time)
+                                     acpi_integer *value)
 {
        struct acpi_ipmi_buffer *buffer;
 
@@ -193,109 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
         * IPMI message returned by IPMI command.
         */
        buffer = (struct acpi_ipmi_buffer *)value;
-       if (!rem_time && !msg->msg_done) {
-               buffer->status = ACPI_IPMI_TIMEOUT;
-               return;
-       }
+
        /*
-        * If the flag of msg_done is not set or the recv length is zero, it
-        * means that the IPMI command is not executed correctly.
-        * The status code will be ACPI_IPMI_UNKNOWN.
+        * If the flag of msg_done is not set, it means that the IPMI command is
+        * not executed correctly.
         */
-       if (!msg->msg_done || !msg->rx_len) {
-               buffer->status = ACPI_IPMI_UNKNOWN;
+       buffer->status = msg->msg_done;
+       if (msg->msg_done != ACPI_IPMI_OK)
                return;
-       }
+
        /*
         * If the IPMI response message is obtained correctly, the status code
         * will be ACPI_IPMI_OK
         */
-       buffer->status = ACPI_IPMI_OK;
        buffer->length = msg->rx_len;
-       memcpy(buffer->data, msg->rx_data, msg->rx_len);
+       memcpy(buffer->data, msg->data, msg->rx_len);
 }
 
 static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 {
-       struct acpi_ipmi_msg *tx_msg, *temp;
-       int count = HZ / 10;
-       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+       struct acpi_ipmi_msg *tx_msg;
+       unsigned long flags;
+
+       /*
+        * NOTE: On-going ipmi_recv_msg
+        * ipmi_msg_handler() may still be invoked by ipmi_si after
+        * flushing.  But it is safe to do a fast flushing on module_exit()
+        * without waiting for all ipmi_recv_msg(s) to complete from
+        * ipmi_msg_handler() as it is ensured by ipmi_si that all
+        * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+        */
+       spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+       while (!list_empty(&ipmi->tx_msg_list)) {
+               tx_msg = list_first_entry(&ipmi->tx_msg_list,
+                                         struct acpi_ipmi_msg,
+                                         head);
+               list_del(&tx_msg->head);
+               spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 
-       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
                /* wake up the sleep thread on the Tx msg */
                complete(&tx_msg->tx_complete);
+               acpi_ipmi_msg_put(tx_msg);
+               spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
        }
+       spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
 
-       /* wait for about 100ms to flush the tx message list */
-       while (count--) {
-               if (list_empty(&ipmi->tx_msg_list))
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+                              struct acpi_ipmi_msg *msg)
+{
+       struct acpi_ipmi_msg *tx_msg, *temp;
+       bool msg_found = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+               if (msg == tx_msg) {
+                       msg_found = true;
+                       list_del(&tx_msg->head);
                        break;
-               schedule_timeout(1);
+               }
        }
-       if (!list_empty(&ipmi->tx_msg_list))
-               dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+       spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+       if (msg_found)
+               acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
        struct acpi_ipmi_device *ipmi_device = user_msg_data;
-       int msg_found = 0;
-       struct acpi_ipmi_msg *tx_msg;
-       struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+       bool msg_found = false;
+       struct acpi_ipmi_msg *tx_msg, *temp;
+       struct device *dev = ipmi_device->dev;
+       unsigned long flags;
 
        if (msg->user != ipmi_device->user_interface) {
-               dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
-                       "returned user %p, expected user %p\n",
-                       msg->user, ipmi_device->user_interface);
-               ipmi_free_recv_msg(msg);
-               return;
+               dev_warn(dev,
+                        "Unexpected response is returned. returned user %p, expected user %p\n",
+                        msg->user, ipmi_device->user_interface);
+               goto out_msg;
        }
-       mutex_lock(&ipmi_device->tx_msg_lock);
-       list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+       list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
                if (msg->msgid == tx_msg->tx_msgid) {
-                       msg_found = 1;
+                       msg_found = true;
+                       list_del(&tx_msg->head);
                        break;
                }
        }
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 
-       mutex_unlock(&ipmi_device->tx_msg_lock);
        if (!msg_found) {
-               dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
-                       "returned.\n", msg->msgid);
-               ipmi_free_recv_msg(msg);
-               return;
+               dev_warn(dev,
+                        "Unexpected response (msg id %ld) is returned.\n",
+                        msg->msgid);
+               goto out_msg;
+       }
+
+       /* copy the response data to Rx_data buffer */
+       if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+               dev_WARN_ONCE(dev, true,
+                             "Unexpected response (msg len %d).\n",
+                             msg->msg.data_len);
+               goto out_comp;
        }
 
-       if (msg->msg.data_len) {
-               /* copy the response data to Rx_data buffer */
-               memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
-               tx_msg->rx_len = msg->msg.data_len;
-               tx_msg->msg_done = 1;
+       /* response msg is an error msg */
+       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+       if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+           msg->msg.data_len == 1) {
+               if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+                       dev_WARN_ONCE(dev, true,
+                                     "Unexpected response (timeout).\n");
+                       tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+               }
+               goto out_comp;
        }
+
+       tx_msg->rx_len = msg->msg.data_len;
+       memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+       tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
        complete(&tx_msg->tx_complete);
+       acpi_ipmi_msg_put(tx_msg);
+out_msg:
        ipmi_free_recv_msg(msg);
-};
+}
 
 static void ipmi_register_bmc(int iface, struct device *dev)
 {
        struct acpi_ipmi_device *ipmi_device, *temp;
-       struct pnp_dev *pnp_dev;
-       ipmi_user_t             user;
        int err;
        struct ipmi_smi_info smi_data;
        acpi_handle handle;
 
        err = ipmi_get_smi_info(iface, &smi_data);
-
        if (err)
                return;
 
-       if (smi_data.addr_src != SI_ACPI) {
-               put_device(smi_data.dev);
-               return;
-       }
-
+       if (smi_data.addr_src != SI_ACPI)
+               goto err_ref;
        handle = smi_data.addr_info.acpi_info.acpi_handle;
+       if (!handle)
+               goto err_ref;
+
+       ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+       if (!ipmi_device) {
+               dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+               goto err_ref;
+       }
 
        mutex_lock(&driver_data.ipmi_lock);
        list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -304,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev)
                 * to the device list, don't add it again.
                 */
                if (temp->handle == handle)
-                       goto out;
-       }
-
-       ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
-       if (!ipmi_device)
-               goto out;
-
-       pnp_dev = to_pnp_dev(smi_data.dev);
-       ipmi_device->handle = handle;
-       ipmi_device->pnp_dev = pnp_dev;
-
-       err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
-                                       ipmi_device, &user);
-       if (err) {
-               dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
-               kfree(ipmi_device);
-               goto out;
+                       goto err_lock;
        }
-       acpi_add_ipmi_device(ipmi_device);
-       ipmi_device->user_interface = user;
-       ipmi_device->ipmi_ifnum = iface;
+       if (!driver_data.selected_smi)
+               driver_data.selected_smi = ipmi_device;
+       list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
        mutex_unlock(&driver_data.ipmi_lock);
-       memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+       put_device(smi_data.dev);
        return;
 
-out:
+err_lock:
        mutex_unlock(&driver_data.ipmi_lock);
+       ipmi_dev_release(ipmi_device);
+err_ref:
        put_device(smi_data.dev);
        return;
 }
@@ -339,23 +502,29 @@ out:
 static void ipmi_bmc_gone(int iface)
 {
        struct acpi_ipmi_device *ipmi_device, *temp;
+       bool dev_found = false;
 
        mutex_lock(&driver_data.ipmi_lock);
        list_for_each_entry_safe(ipmi_device, temp,
-                               &driver_data.ipmi_devices, head) {
-               if (ipmi_device->ipmi_ifnum != iface)
-                       continue;
-
-               acpi_remove_ipmi_device(ipmi_device);
-               put_device(ipmi_device->smi_data.dev);
-               kfree(ipmi_device);
-               break;
+                                &driver_data.ipmi_devices, head) {
+               if (ipmi_device->ipmi_ifnum != iface) {
+                       dev_found = true;
+                       __ipmi_dev_kill(ipmi_device);
+                       break;
+               }
        }
+       if (!driver_data.selected_smi)
+               driver_data.selected_smi = list_first_entry_or_null(
+                                       &driver_data.ipmi_devices,
+                                       struct acpi_ipmi_device, head);
        mutex_unlock(&driver_data.ipmi_lock);
+
+       if (dev_found) {
+               ipmi_flush_tx_msg(ipmi_device);
+               acpi_ipmi_dev_put(ipmi_device);
+       }
 }
-/* --------------------------------------------------------------------------
- *                     Address Space Management
- * -------------------------------------------------------------------------- */
+
 /*
  * This is the IPMI opregion space handler.
  * @function: indicates the read/write. In fact as the IPMI message is driven
@@ -368,16 +537,17 @@ static void ipmi_bmc_gone(int iface)
  *          the response IPMI message returned by IPMI command.
  * @handler_context: IPMI device context.
  */
-
 static acpi_status
 acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
-                     u32 bits, acpi_integer *value,
-                     void *handler_context, void *region_context)
+                       u32 bits, acpi_integer *value,
+                       void *handler_context, void *region_context)
 {
        struct acpi_ipmi_msg *tx_msg;
-       struct acpi_ipmi_device *ipmi_device = handler_context;
-       int err, rem_time;
+       struct acpi_ipmi_device *ipmi_device;
+       int err;
        acpi_status status;
+       unsigned long flags;
+
        /*
         * IPMI opregion message.
         * IPMI message is firstly written to the BMC and system software
@@ -387,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        if ((function & ACPI_IO_MASK) == ACPI_READ)
                return AE_TYPE;
 
-       if (!ipmi_device->user_interface)
+       tx_msg = ipmi_msg_alloc();
+       if (!tx_msg)
                return AE_NOT_EXIST;
+       ipmi_device = tx_msg->device;
 
-       tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
-       if (!tx_msg)
-               return AE_NO_MEMORY;
+       if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+               ipmi_msg_release(tx_msg);
+               return AE_TYPE;
+       }
 
-       acpi_format_ipmi_msg(tx_msg, address, value);
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       acpi_ipmi_msg_get(tx_msg);
+       mutex_lock(&driver_data.ipmi_lock);
+       /* Do not add a tx_msg that can not be flushed. */
+       if (ipmi_device->dead) {
+               mutex_unlock(&driver_data.ipmi_lock);
+               ipmi_msg_release(tx_msg);
+               return AE_NOT_EXIST;
+       }
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+       mutex_unlock(&driver_data.ipmi_lock);
+
        err = ipmi_request_settime(ipmi_device->user_interface,
-                                       &tx_msg->addr,
-                                       tx_msg->tx_msgid,
-                                       &tx_msg->tx_message,
-                                       NULL, 0, 0, 0);
+                                  &tx_msg->addr,
+                                  tx_msg->tx_msgid,
+                                  &tx_msg->tx_message,
+                                  NULL, 0, 0, IPMI_TIMEOUT);
        if (err) {
                status = AE_ERROR;
-               goto end_label;
+               goto out_msg;
        }
-       rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
-                                       IPMI_TIMEOUT);
-       acpi_format_ipmi_response(tx_msg, value, rem_time);
+       wait_for_completion(&tx_msg->tx_complete);
+
+       acpi_format_ipmi_response(tx_msg, value);
        status = AE_OK;
 
-end_label:
-       mutex_lock(&ipmi_device->tx_msg_lock);
-       list_del(&tx_msg->head);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
-       kfree(tx_msg);
+out_msg:
+       ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+       acpi_ipmi_msg_put(tx_msg);
        return status;
 }
 
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
-       if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
-               return;
-
-       acpi_remove_address_space_handler(ipmi->handle,
-                               ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
-       clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+static int __init acpi_ipmi_init(void)
 {
+       int result;
        acpi_status status;
 
-       if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+       if (acpi_disabled)
                return 0;
 
-       status = acpi_install_address_space_handler(ipmi->handle,
+       status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
                                                    ACPI_ADR_SPACE_IPMI,
                                                    &acpi_ipmi_space_handler,
-                                                   NULL, ipmi);
+                                                   NULL, NULL);
        if (ACPI_FAILURE(status)) {
-               struct pnp_dev *pnp_dev = ipmi->pnp_dev;
-               dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
-                       "handle\n");
+               pr_warn("Can't register IPMI opregion space handle\n");
                return -EINVAL;
        }
-       set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-       return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
-       INIT_LIST_HEAD(&ipmi_device->head);
-
-       mutex_init(&ipmi_device->tx_msg_lock);
-       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
-       ipmi_install_space_handler(ipmi_device);
-
-       list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-       /*
-        * If the IPMI user interface is created, it should be
-        * destroyed.
-        */
-       if (ipmi_device->user_interface) {
-               ipmi_destroy_user(ipmi_device->user_interface);
-               ipmi_device->user_interface = NULL;
-       }
-       /* flush the Tx_msg list */
-       if (!list_empty(&ipmi_device->tx_msg_list))
-               ipmi_flush_tx_msg(ipmi_device);
-
-       list_del(&ipmi_device->head);
-       ipmi_remove_space_handler(ipmi_device);
-}
-
-static int __init acpi_ipmi_init(void)
-{
-       int result = 0;
-
-       if (acpi_disabled)
-               return result;
-
-       mutex_init(&driver_data.ipmi_lock);
-
        result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+       if (result)
+               pr_err("Can't register IPMI system interface watcher\n");
 
        return result;
 }
 
 static void __exit acpi_ipmi_exit(void)
 {
-       struct acpi_ipmi_device *ipmi_device, *temp;
+       struct acpi_ipmi_device *ipmi_device;
 
        if (acpi_disabled)
                return;
@@ -512,13 +639,22 @@ static void __exit acpi_ipmi_exit(void)
         * handler and free it.
         */
        mutex_lock(&driver_data.ipmi_lock);
-       list_for_each_entry_safe(ipmi_device, temp,
-                               &driver_data.ipmi_devices, head) {
-               acpi_remove_ipmi_device(ipmi_device);
-               put_device(ipmi_device->smi_data.dev);
-               kfree(ipmi_device);
+       while (!list_empty(&driver_data.ipmi_devices)) {
+               ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+                                              struct acpi_ipmi_device,
+                                              head);
+               __ipmi_dev_kill(ipmi_device);
+               mutex_unlock(&driver_data.ipmi_lock);
+
+               ipmi_flush_tx_msg(ipmi_device);
+               acpi_ipmi_dev_put(ipmi_device);
+
+               mutex_lock(&driver_data.ipmi_lock);
        }
        mutex_unlock(&driver_data.ipmi_lock);
+       acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+                                         ACPI_ADR_SPACE_IPMI,
+                                         &acpi_ipmi_space_handler);
 }
 
 module_init(acpi_ipmi_init);
index fb78bb9ad8f65888817fc6f7a3ecb2563b975134..d3961014aad7ff9d77bcb296ec2f851f96d64b5b 100644 (file)
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss");
 /* Offsets relative to LPSS_PRIVATE_OFFSET */
 #define LPSS_GENERAL                   0x08
 #define LPSS_GENERAL_LTR_MODE_SW       BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
 #define LPSS_SW_LTR                    0x10
 #define LPSS_AUTO_LTR                  0x14
 #define LPSS_TX_INT                    0x20
@@ -68,11 +69,16 @@ struct lpss_private_data {
 
 static void lpss_uart_setup(struct lpss_private_data *pdata)
 {
-       unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+       unsigned int offset;
        u32 reg;
 
-       reg = readl(pdata->mmio_base + tx_int_offset);
-       writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+       offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+       reg = readl(pdata->mmio_base + offset);
+       writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+       offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+       reg = readl(pdata->mmio_base + offset);
+       writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
index 999adb5499c7270626466a7d74870531c3e3ae28..551dad712ffec451364a08c01487a1987c10d7db 100644 (file)
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
        unsigned long long current_status;
 
        /* Get device present/absent information from the _STA */
-       if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
-                                              NULL, &current_status)))
+       if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+                                              METHOD_NAME__STA, NULL,
+                                              &current_status)))
                return -ENODEV;
        /*
         * Check for device status. Device should be
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
                if (!info->enabled)
                        continue;
 
-               if (nid < 0)
+               if (nid == NUMA_NO_NODE)
                        nid = memory_add_physaddr_to_nid(info->start_addr);
 
                acpi_unbind_memory_blocks(info, handle);
index 1bde12708f9e112c708bb71ae8d6162af5e367a9..8a4cfc7e71f0f83cc7a8644e75eab627c8e3c52d 100644 (file)
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform");
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
        { "PNP0D40" },
+       { "ACPI0003" },
+       { "VPC2004" },
+       { "BCM4752" },
+
+       /* Intel Smart Sound Technology */
+       { "INT33C8" },
+       { "80860F28" },
 
        { }
 };
index f29e06efa47976eba16e3b4e449a8b5a9235909f..3c1d6b0c09a4b7d1eeeb9347a7c743d1c7487988 100644 (file)
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
        return 0;
 }
 
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
 {
        int result = 0;
        struct pci_dev *dev = NULL;
 
-
-       if (!pr)
-               return -EINVAL;
-
        /*
         * PIIX4
         */
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        cpu_maps_update_begin();
        cpu_hotplug_begin();
 
-       ret = acpi_map_lsapic(pr->handle, &pr->id);
+       ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
        if (ret)
                goto out;
 
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
        int cpu_index, device_declaration = 0;
        acpi_status status = AE_OK;
        static int cpu0_initialized;
+       unsigned long long value;
 
-       if (num_online_cpus() > 1)
-               errata.smp = TRUE;
-
-       acpi_processor_errata(pr);
+       acpi_processor_errata();
 
        /*
         * Check to see if we have bus mastering arbitration control.  This
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device)
                        return -ENODEV;
                }
 
-               /*
-                * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
-                *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
-                *      arch/xxx/acpi.c
-                */
                pr->acpi_id = object.processor.proc_id;
        } else {
                /*
                 * Declared with "Device" statement; match _UID.
                 * Note that we don't handle string _UIDs yet.
                 */
-               unsigned long long value;
                status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
                                                NULL, &value);
                if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
                device_declaration = 1;
                pr->acpi_id = value;
        }
-       cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+       pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+                                       pr->acpi_id);
+       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
 
        /* Handle UP system running SMP kernel, with no LAPIC in MADT */
        if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
         * ensure we get the right value in the "physical id" field
         * of /proc/cpuinfo
         */
-       status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+       status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
        if (ACPI_SUCCESS(status))
-               arch_fix_phys_package_id(pr->id, object.integer.value);
+               arch_fix_phys_package_id(pr->id, value);
 
        return 0;
 }
index 9feba08c29fe08b772e976fdefc7531300eb4d2d..27c36a5251b56dd560b04333a773ddd875360fd3 100644 (file)
@@ -113,11 +113,12 @@ void acpi_db_display_handlers(void);
 ACPI_HW_DEPENDENT_RETURN_VOID(void
                              acpi_db_generate_gpe(char *gpe_arg,
                                                   char *block_arg))
+ ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
 
 /*
  * dbconvert - miscellaneous conversion routines
  */
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
 
 acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
 
@@ -154,6 +155,8 @@ void acpi_db_set_scope(char *name);
 
 void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
 
+void acpi_db_dump_namespace_paths(void);
+
 void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
 
 acpi_status acpi_db_find_name_in_namespace(char *name_arg);
index ab0e9771038127c83365fc3b1d72de841612fb99..3ae5fd02ae649f46677b362af9aa4cd39c4e6e3b 100644 (file)
@@ -242,11 +242,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
  */
 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
 
-u32 acpi_ev_install_sci_handler(void);
+u32 acpi_ev_sci_dispatch(void);
 
-acpi_status acpi_ev_remove_sci_handler(void);
+u32 acpi_ev_install_sci_handler(void);
 
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
 
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
 #endif                         /* __ACEVENTS_H__  */
index 90e846f985fa30caa109c8de336ac8e677a29bee..0fba431f4fcb90fd1bad8ec85b870486a33b70d6 100644 (file)
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
 ACPI_EXTERN void *acpi_gbl_table_handler_context;
 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
 ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
 
 /* Owner ID support */
 
@@ -445,13 +446,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
 ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
 ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
 ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
 ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
 ACPI_EXTERN char *acpi_gbl_db_buffer;
 ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +453,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level;
 ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
 
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
 /*
  * Statistic globals
  */
index 0ed00669cd217a0754087ed61bc89465e31804f3..be9e30ee60488b7de09a61ea1dd198fe84ebf43b 100644 (file)
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info {
  *
  ****************************************************************************/
 
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+       struct acpi_sci_handler_info *next;
+       acpi_sci_handler address;       /* Address of handler */
+       void *context;          /* Context to be passed to handler */
+};
+
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
 struct acpi_gpe_handler_info {
index 40b04bd5579e36abec92165270c44f576e8fddc4..e6138ac4a16054038275fc81f2f616cd70c26d5d 100644 (file)
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type,
                     u8 display_type,
                     u32 max_depth,
                     acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+                         u8 display_type,
+                         u32 max_depth,
+                         acpi_owner_id owner_id, acpi_handle start_handle);
 #endif                         /* ACPI_FUTURE_USAGE */
 
 /*
index b24dbb80fab8f681dc3089cd8a29c700f5cb57c7..d52339090b604ba1569f599a03a45e6712e54ee9 100644 (file)
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
  *
- * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
+ * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
  *
  * RETURN:      A GPE interrupt block
  *
index 1b111ef74903771f8f9a8f690aef85b752f65c61..a5687540e9a66e5ba20bfeb9772ad5351b20bc61 100644 (file)
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void)
 
                status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
 
-               /* Remove SCI handler */
-
-               status = acpi_ev_remove_sci_handler();
-               if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
-               }
-
                status = acpi_ev_remove_global_lock_handler();
                if (ACPI_FAILURE(status)) {
                        ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void)
                acpi_gbl_events_initialized = FALSE;
        }
 
+       /* Remove SCI handlers */
+
+       status = acpi_ev_remove_all_sci_handlers();
+       if (ACPI_FAILURE(status)) {
+               ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+       }
+
        /* Deallocate all handler objects installed within GPE info structs */
 
        status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
index cea14d6fc76c976db580144f93d28d890ff3938f..6293d6bb6fe1e2a0348f90505ebbaa36f2f3f3b2 100644 (file)
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
                        region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
 
-                       if (region_obj2->extra.region_context) {
-
-                               /* The handler for this region was already installed */
-
-                               ACPI_FREE(region_context);
-                       } else {
-                               /*
-                                * Save the returned context for use in all accesses to
-                                * this particular region
-                                */
+                       /*
+                        * Save the returned context for use in all accesses to
+                        * the handler for this particular region
+                        */
+                       if (!(region_obj2->extra.region_context)) {
                                region_obj2->extra.region_context =
                                    region_context;
                        }
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
                                                 handler_obj->address_space.
                                                 context, region_context);
 
+                               /*
+                                * region_context should have been released by the deactivate
+                                * operation. We don't need access to it anymore here.
+                                */
+                               if (region_context) {
+                                       *region_context = NULL;
+                               }
+
                                /* Init routine may fail, Just ignore errors */
 
                                if (ACPI_FAILURE(status)) {
index b905acf7aacdc1d90f9189b9660df28eb6777f08..94d9ebddf5755031a34b654664b0be6471330662 100644 (file)
@@ -52,6 +52,50 @@ ACPI_MODULE_NAME("evsci")
 /* Local prototypes */
 static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_sci_dispatch
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
+       u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+       ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+       /* Are there any host-installed SCI handlers? */
+
+       if (!acpi_gbl_sci_handler_list) {
+               return (int_status);
+       }
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Invoke all host-installed SCI handlers */
+
+       sci_handler = acpi_gbl_sci_handler_list;
+       while (sci_handler) {
+
+               /* Invoke the installed handler (at interrupt level) */
+
+               int_status |= sci_handler->address(sci_handler->context);
+
+               sci_handler = sci_handler->next;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return (int_status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_sci_xrupt_handler
@@ -89,6 +133,10 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
         */
        interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
 
+       /* Invoke all host-installed SCI handlers */
+
+       interrupt_handled |= acpi_ev_sci_dispatch();
+
        return_UINT32(interrupt_handled);
 }
 
@@ -112,14 +160,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
        ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
 
        /*
-        * We are guaranteed by the ACPI CA initialization/shutdown code that
+        * We are guaranteed by the ACPICA initialization/shutdown code that
         * if this interrupt handler is installed, ACPI is enabled.
         */
 
        /* GPEs: Check for and dispatch any GPEs that have occurred */
 
        interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
        return_UINT32(interrupt_handled);
 }
 
@@ -150,15 +197,15 @@ u32 acpi_ev_install_sci_handler(void)
 
 /******************************************************************************
  *
- * FUNCTION:    acpi_ev_remove_sci_handler
+ * FUNCTION:    acpi_ev_remove_all_sci_handlers
  *
  * PARAMETERS:  none
  *
- * RETURN:      E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN:      AE_OK if handler uninstalled, AE_ERROR if handler was not
  *              installed to begin with
  *
  * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- *              taken.
+ *              taken. Remove all host-installed SCI handlers.
  *
  * Note:  It doesn't seem important to disable all events or set the event
  *        enable registers to their original values. The OS should disable
@@ -167,11 +214,13 @@ u32 acpi_ev_install_sci_handler(void)
  *
  ******************************************************************************/
 
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
 {
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
        acpi_status status;
 
-       ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+       ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
 
        /* Just let the OS remove the handler and disable the level */
 
@@ -179,6 +228,21 @@ acpi_status acpi_ev_remove_sci_handler(void)
            acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
                                             acpi_ev_sci_xrupt_handler);
 
+       if (!acpi_gbl_sci_handler_list) {
+               return (status);
+       }
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Free all host-installed SCI handlers */
+
+       while (acpi_gbl_sci_handler_list) {
+               sci_handler = acpi_gbl_sci_handler_list;
+               acpi_gbl_sci_handler_list = sci_handler->next;
+               ACPI_FREE(sci_handler);
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
        return_ACPI_STATUS(status);
 }
 
index ca5fba99c33bc82a302f8eb246ca41224a2cbada..6f56146a6f88a55a7d0ebebaa4be0bbd84126bd0 100644 (file)
@@ -383,6 +383,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
 #endif                         /*  ACPI_FUTURE_USAGE  */
 
 #if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *              context             - Value passed to the handler on each SCI
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+       struct acpi_sci_handler_info *new_sci_handler;
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+       if (!address) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Allocate and init a handler object */
+
+       new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+       if (!new_sci_handler) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
+       new_sci_handler->address = address;
+       new_sci_handler->context = context;
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               goto exit;
+       }
+
+       /* Lock list during installation */
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+       sci_handler = acpi_gbl_sci_handler_list;
+
+       /* Ensure handler does not already exist */
+
+       while (sci_handler) {
+               if (address == sci_handler->address) {
+                       status = AE_ALREADY_EXISTS;
+                       goto unlock_and_exit;
+               }
+
+               sci_handler = sci_handler->next;
+       }
+
+       /* Install the new handler into the global list (at head) */
+
+       new_sci_handler->next = acpi_gbl_sci_handler_list;
+       acpi_gbl_sci_handler_list = new_sci_handler;
+
+      unlock_and_exit:
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+      exit:
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(new_sci_handler);
+       }
+       return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+       struct acpi_sci_handler_info *prev_sci_handler;
+       struct acpi_sci_handler_info *next_sci_handler;
+       acpi_cpu_flags flags;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+       if (!address) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Remove the SCI handler with lock */
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       prev_sci_handler = NULL;
+       next_sci_handler = acpi_gbl_sci_handler_list;
+       while (next_sci_handler) {
+               if (next_sci_handler->address == address) {
+
+                       /* Unlink and free the SCI handler info block */
+
+                       if (prev_sci_handler) {
+                               prev_sci_handler->next = next_sci_handler->next;
+                       } else {
+                               acpi_gbl_sci_handler_list =
+                                   next_sci_handler->next;
+                       }
+
+                       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+                       ACPI_FREE(next_sci_handler);
+                       goto unlock_and_exit;
+               }
+
+               prev_sci_handler = next_sci_handler;
+               next_sci_handler = next_sci_handler->next;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       status = AE_NOT_EXIST;
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+       return_ACPI_STATUS(status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_global_event_handler
@@ -398,6 +536,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
  *              Can be used to update event counters, etc.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
 {
index 5ee7a814cd9207db34d42b8945867e415ab733bb..f81fb068d20ed34613615d76217841e2c7297ad4 100644 (file)
@@ -119,7 +119,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
  ******************************************************************************/
 acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
 {
-       u32 value;
+       u32 value_lo;
+       u32 value_hi;
        u32 width;
        u64 address;
        acpi_status status;
@@ -137,13 +138,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                return (status);
        }
 
-       /* Initialize entire 64-bit return value to zero */
-
-       *return_value = 0;
-       value = 0;
-
        /*
-        * Two address spaces supported: Memory or IO. PCI_Config is
+        * Two address spaces supported: Memory or I/O. PCI_Config is
         * not supported here because the GAS structure is insufficient
         */
        if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +151,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                }
        } else {                /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
+               value_lo = 0;
+               value_hi = 0;
+
                width = reg->bit_width;
                if (width == 64) {
                        width = 32;     /* Break into two 32-bit transfers */
                }
 
                status = acpi_hw_read_port((acpi_io_address)
-                                          address, &value, width);
+                                          address, &value_lo, width);
                if (ACPI_FAILURE(status)) {
                        return (status);
                }
-               *return_value = value;
 
                if (reg->bit_width == 64) {
 
                        /* Read the top 32 bits */
 
                        status = acpi_hw_read_port((acpi_io_address)
-                                                  (address + 4), &value, 32);
+                                                  (address + 4), &value_hi,
+                                                  32);
                        if (ACPI_FAILURE(status)) {
                                return (status);
                        }
-                       *return_value |= ((u64)value << 32);
                }
+
+               /* Set the return value only if status is AE_OK */
+
+               *return_value = (value_lo | ((u64)value_hi << 32));
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +188,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                          ACPI_FORMAT_UINT64(address),
                          acpi_ut_get_region_name(reg->space_id)));
 
-       return (status);
+       return (AE_OK);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_read)
index c5316e5bd4abf2efce5aad7d4968708d3bccdb3d..aff79c7392ff3ff20e19e0a8f43a82709b0053ef 100644 (file)
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                                        /* Current scope has no parent scope */
 
                                        ACPI_ERROR((AE_INFO,
-                                                   "ACPI path has too many parent prefixes (^) "
-                                                   "- reached beyond root node"));
+                                                   "%s: Path has too many parent prefixes (^) "
+                                                   "- reached beyond root node",
+                                                   pathname));
                                        return_ACPI_STATUS(AE_NOT_FOUND);
                                }
                        }
index 7418c77fde8c70db88d0d0af9c0c6a8fcad6cad5..80633851cb2fd00e7886bdc4deacf0929726818a 100644 (file)
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
 #endif
 
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+                            u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+                     u32 level, void *context, void **return_value);
+#endif                         /* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_print_pathname
@@ -671,6 +682,129 @@ acpi_ns_dump_objects(acpi_object_type type,
 }
 #endif                         /* ACPI_FUTURE_USAGE */
 
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS:  obj_handle          - Node to be dumped
+ *              level               - Nesting level of the handle
+ *              context             - Passed into walk_namespace
+ *              return_value        - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ *              computes the maximum nesting depth in the namespace tree, in
+ *              order to simplify formatting in acpi_ns_dump_one_object_path.
+ *              These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+                            u32 level, void *context, void **return_value)
+{
+       u32 max_level = *((u32 *)context);
+       char *pathname;
+       struct acpi_namespace_node *node;
+       int path_indent;
+
+       if (!obj_handle) {
+               return (AE_OK);
+       }
+
+       node = acpi_ns_validate_handle(obj_handle);
+       pathname = acpi_ns_get_external_pathname(node);
+
+       path_indent = 1;
+       if (level <= max_level) {
+               path_indent = max_level - level + 1;
+       }
+
+       acpi_os_printf("%2d%*s%-12s%*s",
+                      level, level, " ", acpi_ut_get_type_name(node->type),
+                      path_indent, " ");
+
+       acpi_os_printf("%s\n", &pathname[1]);
+       ACPI_FREE(pathname);
+       return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+                     u32 level, void *context, void **return_value)
+{
+       u32 *max_level = (u32 *)context;
+
+       if (level > *max_level) {
+               *max_level = level;
+       }
+       return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_object_paths
+ *
+ * PARAMETERS:  type                - Object type to be dumped
+ *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
+ *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
+ *                                    for an effectively unlimited depth.
+ *              owner_id            - Dump only objects owned by this ID. Use
+ *                                    ACPI_UINT32_MAX to match all owners.
+ *              start_handle        - Where in namespace to start/end search
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ *              acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+                         u8 display_type,
+                         u32 max_depth,
+                         acpi_owner_id owner_id, acpi_handle start_handle)
+{
+       acpi_status status;
+       u32 max_level = 0;
+
+       ACPI_FUNCTION_ENTRY();
+
+       /*
+        * Just lock the entire namespace for the duration of the dump.
+        * We don't want any changes to the namespace during this time,
+        * especially the temporary nodes since we are going to display
+        * them also.
+        */
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               acpi_os_printf("Could not acquire namespace mutex\n");
+               return;
+       }
+
+       /* Get the max depth of the namespace tree, for formatting later */
+
+       (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+                                    ACPI_NS_WALK_NO_UNLOCK |
+                                    ACPI_NS_WALK_TEMP_NODES,
+                                    acpi_ns_get_max_depth, NULL,
+                                    (void *)&max_level, NULL);
+
+       /* Now dump the entire namespace */
+
+       (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+                                    ACPI_NS_WALK_NO_UNLOCK |
+                                    ACPI_NS_WALK_TEMP_NODES,
+                                    acpi_ns_dump_one_object_path, NULL,
+                                    (void *)&max_level, NULL);
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif                         /* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_dump_entry
index b38b4b07f86e31d8920a7b1467ba4fb6604fc6eb..481a6b4a9b2ffbeb64418abd6b458697faa74eb5 100644 (file)
@@ -605,11 +605,19 @@ acpi_walk_namespace(acpi_object_type type,
                goto unlock_and_exit;
        }
 
+       /* Now we can validate the starting node */
+
+       if (!acpi_ns_validate_handle(start_object)) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit2;
+       }
+
        status = acpi_ns_walk_namespace(type, start_object, max_depth,
                                        ACPI_NS_WALK_UNLOCK,
                                        descending_callback, ascending_callback,
                                        context, return_value);
 
+      unlock_and_exit2:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
       unlock_and_exit:
index 42a13c0d7015360ab44c684ab9645500fc63f1d8..9e6788f9ba0ff5865bd2caeac1d05918c407fa11 100644 (file)
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
                }
        }
 
-       /* FACS is the odd table, has no standard ACPI header and no checksum */
+       /* Always calculate checksum, ignore bad checksum if requested */
 
-       if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
-               /* Always calculate checksum, ignore bad checksum if requested */
-
-               status =
-                   acpi_tb_verify_checksum(table_desc->pointer,
-                                           table_desc->length);
-       }
+       status =
+           acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
 
        return_ACPI_STATUS(status);
 }
index dc963f823d2c094bb204b7baf6734fb6d340c007..9a47715af1f37893e2985b370b7b63e9f57a01bf 100644 (file)
@@ -138,7 +138,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
                ACPI_INFO((AE_INFO, "%4.4s %p %05X",
                           header->signature, ACPI_CAST_PTR(void, address),
                           header->length));
-       } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+       } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
 
                /* RSDP has no common fields */
 
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
 {
        u8 checksum;
 
+       /*
+        * FACS/S3PT:
+        * They are the odd tables, have no standard ACPI header and no checksum
+        */
+
+       if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+           ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+               return (AE_OK);
+       }
+
        /* Compute the checksum on the table */
 
        checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
index 948c95e80d44765f41e82bb62c92bb4c3405bb01..1c95fabbe6a42171b868ea85e6825a3363dc31d5 100644 (file)
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
         * Note: Sometimes there exists more than one RSDP in memory; the valid
         * RSDP has a valid checksum, all others have an invalid checksum.
         */
-       if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
-                        sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+       if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
 
                /* Nope, BAD Signature */
 
index 5796e11a0671d32fad4329262e05b51d038cd38e..ffd0db509d347d00ba6da16f6c8c03e047d323f2 100644 (file)
@@ -190,7 +190,7 @@ acpi_debug_print(u32 requested_debug_level,
         * Display the module name, current line number, thread ID (if requested),
         * current procedure nesting level, and the current procedure name
         */
-       acpi_os_printf("%8s-%04ld ", module_name, line_number);
+       acpi_os_printf("%9s-%04ld ", module_name, line_number);
 
        if (ACPI_LV_THREADS & acpi_dbg_level) {
                acpi_os_printf("[%u] ", (u32)thread_id);
index d6f26bf8a0626d0d7497baeaf1ce931d3a76c9ba..046d5b059c07f3ce56b6f35d2c2aac3ea9d59f0d 100644 (file)
@@ -291,7 +291,7 @@ acpi_status acpi_ut_init_globals(void)
 
 #if (!ACPI_REDUCED_HARDWARE)
 
-       /* GPE support */
+       /* GPE/SCI support */
 
        acpi_gbl_all_gpes_initialized = FALSE;
        acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +300,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_current_gpe_count = 0;
 
        acpi_gbl_global_event_handler = NULL;
+       acpi_gbl_sci_handler_list = NULL;
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 46f80e2c92f7da893ff28f5993d89fa01efc323a..6d2c49b86b7fa82434bdb0b2ce1906c95dce00ca 100644 (file)
@@ -758,9 +758,9 @@ int apei_osc_setup(void)
                .cap.pointer    = capbuf,
        };
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = 1;
-       capbuf[OSC_CONTROL_TYPE] = 0;
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = 1;
+       capbuf[OSC_CONTROL_DWORD] = 0;
 
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
            || ACPI_FAILURE(acpi_run_osc(handle, &context)))
index 2c9958cd7a4350ae675b76ad338904999ae149c1..fbf1aceda8b8ab915a7d2476d78db2b6e2b94e68 100644 (file)
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <linux/power_supply.h>
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
-       info_tag = 0,
-       state_tag,
-       alarm_tag,
-       ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
 static const struct acpi_device_id battery_device_ids[] = {
        {"PNP0C0A", 0},
        {"", 0},
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-       return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
-               "mA" : "mW";
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -740,279 +713,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
        sysfs_add_battery(battery);
 }
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       seq_printf(seq, "present:                 %s\n",
-                  acpi_battery_present(battery) ? "yes" : "no");
-       if (!acpi_battery_present(battery))
-               goto end;
-       if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "design capacity:         unknown\n");
-       else
-               seq_printf(seq, "design capacity:         %d %sh\n",
-                          battery->design_capacity,
-                          acpi_battery_units(battery));
-
-       if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "last full capacity:      unknown\n");
-       else
-               seq_printf(seq, "last full capacity:      %d %sh\n",
-                          battery->full_charge_capacity,
-                          acpi_battery_units(battery));
-
-       seq_printf(seq, "battery technology:      %srechargeable\n",
-                  (!battery->technology)?"non-":"");
-
-       if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "design voltage:          unknown\n");
-       else
-               seq_printf(seq, "design voltage:          %d mV\n",
-                          battery->design_voltage);
-       seq_printf(seq, "design capacity warning: %d %sh\n",
-                  battery->design_capacity_warning,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "design capacity low:     %d %sh\n",
-                  battery->design_capacity_low,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
-       seq_printf(seq, "capacity granularity 1:  %d %sh\n",
-                  battery->capacity_granularity_1,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "capacity granularity 2:  %d %sh\n",
-                  battery->capacity_granularity_2,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "model number:            %s\n", battery->model_number);
-       seq_printf(seq, "serial number:           %s\n", battery->serial_number);
-       seq_printf(seq, "battery type:            %s\n", battery->type);
-       seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery info\n");
-       return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       seq_printf(seq, "present:                 %s\n",
-                  acpi_battery_present(battery) ? "yes" : "no");
-       if (!acpi_battery_present(battery))
-               goto end;
-
-       seq_printf(seq, "capacity state:          %s\n",
-                       (battery->state & 0x04) ? "critical" : "ok");
-       if ((battery->state & 0x01) && (battery->state & 0x02))
-               seq_printf(seq,
-                          "charging state:          charging/discharging\n");
-       else if (battery->state & 0x01)
-               seq_printf(seq, "charging state:          discharging\n");
-       else if (battery->state & 0x02)
-               seq_printf(seq, "charging state:          charging\n");
-       else
-               seq_printf(seq, "charging state:          charged\n");
-
-       if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "present rate:            unknown\n");
-       else
-               seq_printf(seq, "present rate:            %d %s\n",
-                          battery->rate_now, acpi_battery_units(battery));
-
-       if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "remaining capacity:      unknown\n");
-       else
-               seq_printf(seq, "remaining capacity:      %d %sh\n",
-                          battery->capacity_now, acpi_battery_units(battery));
-       if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "present voltage:         unknown\n");
-       else
-               seq_printf(seq, "present voltage:         %d mV\n",
-                          battery->voltage_now);
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery state\n");
-
-       return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       if (!acpi_battery_present(battery)) {
-               seq_printf(seq, "present:                 no\n");
-               goto end;
-       }
-       seq_printf(seq, "alarm:                   ");
-       if (!battery->alarm)
-               seq_printf(seq, "unsupported\n");
-       else
-               seq_printf(seq, "%u %sh\n", battery->alarm,
-                               acpi_battery_units(battery));
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery alarm\n");
-       return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
-                                       const char __user * buffer,
-                                       size_t count, loff_t * ppos)
-{
-       int result = 0;
-       char alarm_string[12] = { '\0' };
-       struct seq_file *m = file->private_data;
-       struct acpi_battery *battery = m->private;
-
-       if (!battery || (count > sizeof(alarm_string) - 1))
-               return -EINVAL;
-       if (!acpi_battery_present(battery)) {
-               result = -ENODEV;
-               goto end;
-       }
-       if (copy_from_user(alarm_string, buffer, count)) {
-               result = -EFAULT;
-               goto end;
-       }
-       alarm_string[count] = '\0';
-       battery->alarm = simple_strtol(alarm_string, NULL, 0);
-       result = acpi_battery_set_alarm(battery);
-      end:
-       if (!result)
-               return count;
-       return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
-       acpi_battery_print_info,
-       acpi_battery_print_state,
-       acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
-       struct acpi_battery *battery = seq->private;
-       int result = acpi_battery_update(battery);
-       return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
-       return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
-       return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
-       { \
-       .name = __stringify(_name), \
-       .mode = S_IRUGO, \
-       .ops = { \
-               .open = acpi_battery_##_name##_open_fs, \
-               .read = seq_read, \
-               .llseek = seq_lseek, \
-               .release = single_release, \
-               .owner = THIS_MODULE, \
-               }, \
-       }
-
-#define FILE_DESCRIPTION_RW(_name) \
-       { \
-       .name = __stringify(_name), \
-       .mode = S_IFREG | S_IRUGO | S_IWUSR, \
-       .ops = { \
-               .open = acpi_battery_##_name##_open_fs, \
-               .read = seq_read, \
-               .llseek = seq_lseek, \
-               .write = acpi_battery_write_##_name, \
-               .release = single_release, \
-               .owner = THIS_MODULE, \
-               }, \
-       }
-
-static const struct battery_file {
-       struct file_operations ops;
-       umode_t mode;
-       const char *name;
-} acpi_battery_file[] = {
-       FILE_DESCRIPTION_RO(info),
-       FILE_DESCRIPTION_RO(state),
-       FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-       int i;
-
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_battery_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
-               entry = proc_create_data(acpi_battery_file[i].name,
-                                        acpi_battery_file[i].mode,
-                                        acpi_device_dir(device),
-                                        &acpi_battery_file[i].ops,
-                                        acpi_driver_data(device));
-               if (!entry)
-                       return -ENODEV;
-       }
-       return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
-       int i;
-       if (!acpi_device_dir(device))
-               return;
-       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
-               remove_proc_entry(acpi_battery_file[i].name,
-                                 acpi_device_dir(device));
-
-       remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
-       acpi_device_dir(device) = NULL;
-}
-
-#endif
-
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device)
        result = acpi_battery_update(battery);
        if (result)
                goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_battery_add_fs(device);
-#endif
-       if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_battery_remove_fs(device);
-#endif
-               goto fail;
-       }
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device)
                return -EINVAL;
        battery = acpi_driver_data(device);
        unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_battery_remove_fs(device);
-#endif
        sysfs_remove_battery(battery);
        mutex_destroy(&battery->lock);
        mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 {
        if (acpi_disabled)
                return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_battery_dir = acpi_lock_battery_dir();
-       if (!acpi_battery_dir)
-               return;
-#endif
-       if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
-               return;
-       }
-       return;
+       acpi_bus_register_driver(&acpi_battery_driver);
 }
 
 static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void)
 static void __exit acpi_battery_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
 }
 
 module_init(acpi_battery_init);
index 9515f18898b2b578053c58309185daa7934cfdda..aa4d874a96fdda83dd548ba30563133e2019f04b 100644 (file)
@@ -273,6 +273,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
                },
        },
+
+       /*
+        * The following machines have broken backlight support when reporting
+        * the Windows 2012 OSI, so disable it until their support is fixed.
+        */
        {
        .callback = dmi_disable_osi_win8,
        .ident = "ASUS Zenbook Prime UX31A",
@@ -297,6 +302,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad Edge E530",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad Edge E530",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-573G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-572G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T431s",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T430",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index b587ec8257b2190758eca2b8b3306a933d4ae16d..fbcfaa682c157dc6a6edcc5ab61b59c55a0cc269 100644 (file)
@@ -255,7 +255,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
                        acpi_print_osc_error(handle, context,
                                "_OSC invalid revision");
                if (errors & OSC_CAPABILITIES_MASK_ERROR) {
-                       if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+                       if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
                            & OSC_QUERY_ENABLE)
                                goto out_success;
                        status = AE_SUPPORT;
@@ -295,30 +295,30 @@ static void acpi_bus_osc_support(void)
        };
        acpi_handle handle;
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
 #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
                        defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
 #endif
 
 #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 #endif
 
 #ifdef ACPI_HOTPLUG_OST
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
 #endif
 
        if (!ghes_disable)
-               capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
+               capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
                return;
        if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
                u32 *capbuf_ret = context.ret.pointer;
-               if (context.ret.length > OSC_SUPPORT_TYPE)
+               if (context.ret.length > OSC_SUPPORT_DWORD)
                        osc_sb_apei_support_acked =
-                               capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
+                               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
                kfree(context.ret.pointer);
        }
        /* do we need to check other returned cap? Sounds no */
index a55773801c5f1fb739df9597dbbde86d272b092c..c971929d75c20090b69afda2ab65b073bc95b140 100644 (file)
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device)
 
        switch (button->type) {
        case ACPI_BUTTON_TYPE_POWER:
-               input->evbit[0] = BIT_MASK(EV_KEY);
-               set_bit(KEY_POWER, input->keybit);
+               input_set_capability(input, EV_KEY, KEY_POWER);
                break;
 
        case ACPI_BUTTON_TYPE_SLEEP:
-               input->evbit[0] = BIT_MASK(EV_KEY);
-               set_bit(KEY_SLEEP, input->keybit);
+               input_set_capability(input, EV_KEY, KEY_SLEEP);
                break;
 
        case ACPI_BUTTON_TYPE_LID:
-               input->evbit[0] = BIT_MASK(EV_SW);
-               set_bit(SW_LID, input->swbit);
+               input_set_capability(input, EV_SW, SW_LID);
                break;
        }
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644 (file)
index 6c9ee68..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS          "ac_adapter"
-#define ACPI_BATTERY_CLASS     "battery"
-#define _COMPONENT             ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (!acpi_ac_dir)
-               acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
-       if (acpi_ac_dir) {
-               lock_ac_dir_cnt++;
-       } else {
-               printk(KERN_ERR PREFIX
-                                 "Cannot create %s\n", ACPI_AC_CLASS);
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (acpi_ac_dir_param)
-               lock_ac_dir_cnt--;
-       if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
-               remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
-               acpi_ac_dir = NULL;
-       }
-       mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (!acpi_battery_dir) {
-               acpi_battery_dir =
-                   proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
-       }
-       if (acpi_battery_dir) {
-               lock_battery_dir_cnt++;
-       } else {
-               printk(KERN_ERR PREFIX
-                                 "Cannot create %s\n", ACPI_BATTERY_CLASS);
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (acpi_battery_dir_param)
-               lock_battery_dir_cnt--;
-       if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
-           && acpi_battery_dir) {
-               remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
-               acpi_battery_dir = NULL;
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
index 59d3202f6b36fc197afe259b343764cb2a9621aa..d42b2fb5a7e94131ce2633ef95f4d7631729325f 100644 (file)
@@ -118,9 +118,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
        /*
         * If we were unsure about the device parent's power state up to this
         * point, the fact that the device is in D0 implies that the parent has
-        * to be in D0 too.
+        * to be in D0 too, except if ignore_parent is set.
         */
-       if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+       if (!device->power.flags.ignore_parent && device->parent
+           && device->parent->power.state == ACPI_STATE_UNKNOWN
            && result == ACPI_STATE_D0)
                device->parent->power.state = ACPI_STATE_D0;
 
@@ -177,7 +178,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                         acpi_power_state_string(state));
                return -ENODEV;
        }
-       if (device->parent && (state < device->parent->power.state)) {
+       if (!device->power.flags.ignore_parent &&
+           device->parent && (state < device->parent->power.state)) {
                dev_warn(&device->dev,
                         "Cannot transition to power state %s for parent in %s\n",
                         acpi_power_state_string(state),
@@ -1025,60 +1027,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
        }
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
-
-/**
- * acpi_dev_pm_add_dependent - Add physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (dep) {
-               dep->dev = depdev;
-               list_add_tail(&dep->node, &adev->power_dependent);
-       }
-
- out:
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
-
-/**
- * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev) {
-                       list_del(&dep->node);
-                       kfree(dep);
-                       break;
-               }
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
 #endif /* CONFIG_PM */
index 05ea4be01a832ccb3d8b38e4318c66fee0f83a89..ca86c1ce7c8a1b5d06713e6a5264e25bd05e99e5 100644 (file)
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock)
        acpi_status status;
        struct acpi_object_list arg_list;
        union acpi_object arg;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       unsigned long long value;
 
        acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
 
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock)
        arg_list.pointer = &arg;
        arg.type = ACPI_TYPE_INTEGER;
        arg.integer.value = dock;
-       status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+       status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
                acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
                                status);
-
-       kfree(buffer.pointer);
 }
 
 static inline void dock(struct dock_station *ds)
index a06d9837470585aaf0298b3f3f96f7e94bd64724..d5309fd494589b4d1596cb1c03576ed1e7ef252e 100644 (file)
@@ -28,6 +28,7 @@
 
 /* Uncomment next line to get verbose printout */
 /* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -49,9 +50,6 @@
 #define ACPI_EC_DEVICE_NAME            "Embedded Controller"
 #define ACPI_EC_FILE_INFO              "info"
 
-#undef PREFIX
-#define PREFIX                         "ACPI: EC: "
-
 /* EC status register */
 #define ACPI_EC_FLAG_OBF       0x01    /* Output buffer full */
 #define ACPI_EC_FLAG_IBF       0x02    /* Input buffer full */
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
        u8 x = inb(ec->command_addr);
-       pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+       pr_debug("---> status = 0x%2.2x\n", x);
        return x;
 }
 
 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
-       pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+       pr_debug("---> data = 0x%2.2x\n", x);
        return x;
 }
 
 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-       pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+       pr_debug("<--- command = 0x%2.2x\n", command);
        outb(command, ec->command_addr);
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
-       pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+       pr_debug("<--- data = 0x%2.2x\n", data);
        outb(data, ec->data_addr);
 }
 
@@ -241,7 +239,7 @@ static int ec_poll(struct acpi_ec *ec)
                        }
                        advance_transaction(ec, acpi_ec_read_status(ec));
                } while (time_before(jiffies, delay));
-               pr_debug(PREFIX "controller reset, restart transaction\n");
+               pr_debug("controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
                start_transaction(ec);
                spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +307,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                }
        }
        if (ec_wait_ibf0(ec)) {
-               pr_err(PREFIX "input buffer is not empty, "
+               pr_err("input buffer is not empty, "
                                "aborting transaction\n");
                status = -ETIME;
                goto end;
        }
-       pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+       pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
                        t->command, t->wdata ? t->wdata[0] : 0);
        /* disable GPE during transaction if storm is detected */
        if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +329,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                /* It is safe to enable the GPE outside of the transaction. */
                acpi_enable_gpe(NULL, ec->gpe);
        } else if (t->irq_count > ec_storm_threshold) {
-               pr_info(PREFIX "GPE storm detected(%d GPEs), "
+               pr_info("GPE storm detected(%d GPEs), "
                        "transactions will use polling mode\n",
                        t->irq_count);
                set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
        }
-       pr_debug(PREFIX "transaction end\n");
+       pr_debug("transaction end\n");
 end:
        if (ec->global_lock)
                acpi_release_global_lock(glk);
@@ -570,12 +568,12 @@ static void acpi_ec_run(void *cxt)
        struct acpi_ec_query_handler *handler = cxt;
        if (!handler)
                return;
-       pr_debug(PREFIX "start query execution\n");
+       pr_debug("start query execution\n");
        if (handler->func)
                handler->func(handler->data);
        else if (handler->handle)
                acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
-       pr_debug(PREFIX "stop query execution\n");
+       pr_debug("stop query execution\n");
        kfree(handler);
 }
 
@@ -593,7 +591,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec)
                        if (!copy)
                                return -ENOMEM;
                        memcpy(copy, handler, sizeof(*copy));
-                       pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+                       pr_debug("push query execution (0x%2x) on queue\n",
+                               value);
                        return acpi_os_execute((copy->func) ?
                                OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
                                acpi_ec_run, copy);
@@ -616,7 +615,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
 {
        if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-                       pr_debug(PREFIX "push gpe query to the queue\n");
+                       pr_debug("push gpe query to the queue\n");
                        return acpi_os_execute(OSL_NOTIFY_HANDLER,
                                acpi_ec_gpe_query, ec);
                }
@@ -630,7 +629,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
        struct acpi_ec *ec = data;
        u8 status = acpi_ec_read_status(ec);
 
-       pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+       pr_debug("~~~> interrupt, status:0x%02x\n", status);
 
        advance_transaction(ec, status);
        if (ec_transaction_done(ec) &&
@@ -776,7 +775,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
                         * The AE_NOT_FOUND error will be ignored and OS
                         * continue to initialize EC.
                         */
-                       printk(KERN_ERR "Fail in evaluating the _REG object"
+                       pr_err("Fail in evaluating the _REG object"
                                " of EC device. Broken bios is suspected.\n");
                } else {
                        acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +794,10 @@ static void ec_remove_handlers(struct acpi_ec *ec)
        acpi_disable_gpe(NULL, ec->gpe);
        if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
-               pr_err(PREFIX "failed to remove space handler\n");
+               pr_err("failed to remove space handler\n");
        if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
                                &acpi_ec_gpe_handler)))
-               pr_err(PREFIX "failed to remove gpe handler\n");
+               pr_err("failed to remove gpe handler\n");
        clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
 }
 
@@ -840,7 +839,7 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = !!request_region(ec->command_addr, 1, "EC cmd");
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-       pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+       pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
                          ec->gpe, ec->command_addr, ec->data_addr);
 
        ret = ec_install_handlers(ec);
@@ -931,7 +930,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
 /* MSI EC needs special treatment, enable it */
 static int ec_flag_msi(const struct dmi_system_id *id)
 {
-       printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+       pr_debug("Detected MSI hardware, enabling workarounds.\n");
        EC_FLAGS_MSI = 1;
        EC_FLAGS_VALIDATE_ECDT = 1;
        return 0;
@@ -1010,7 +1009,7 @@ int __init acpi_ec_ecdt_probe(void)
        status = acpi_get_table(ACPI_SIG_ECDT, 1,
                                (struct acpi_table_header **)&ecdt_ptr);
        if (ACPI_SUCCESS(status)) {
-               pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+               pr_info("EC description table is found, configuring boot EC\n");
                boot_ec->command_addr = ecdt_ptr->control.address;
                boot_ec->data_addr = ecdt_ptr->data.address;
                boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1029,7 @@ int __init acpi_ec_ecdt_probe(void)
 
        /* This workaround is needed only on some broken machines,
         * which require early EC, but fail to provide ECDT */
-       printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+       pr_debug("Look up EC in DSDT\n");
        status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
                                        boot_ec, NULL);
        /* Check that acpi_get_devices actually find something */
@@ -1042,7 +1041,7 @@ int __init acpi_ec_ecdt_probe(void)
                    saved_ec->data_addr != boot_ec->data_addr ||
                    saved_ec->gpe != boot_ec->gpe ||
                    saved_ec->handle != boot_ec->handle)
-                       pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+                       pr_info("ASUSTek keeps feeding us with broken "
                        "ECDT tables, which are very hard to workaround. "
                        "Trying to use DSDT EC info instead. Please send "
                        "output of acpidump to linux-acpi@vger.kernel.org\n");
index 41ade6570bc07c22c3c40445d696aea26e4e4c41..ba3da88cee45a20479df62023ee560aa422dff22 100644 (file)
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device)
               acpi_device_name(device), acpi_device_bid(device),
               !device->power.state ? "on" : "off");
 
-      end:
+end:
        return result;
 }
 
index 20f423337e1fb73c66379a3771954741860c6d7d..e9304dc7ebfa0dc40c37dcc3be270cc299b94170 100644 (file)
@@ -169,9 +169,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
                                        Video
   -------------------------------------------------------------------------- */
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
index 2e82e5d7693016676ad6afdf69ce3b553e968d44..a2343a1d9e0b1a0e77f3b7a7caf6676499c5f989 100644 (file)
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm)
 {
        int node = pxm_to_node_map[pxm];
 
-       if (node < 0) {
+       if (node == NUMA_NO_NODE) {
                if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
                        return NUMA_NO_NODE;
                node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h)
 
 int acpi_get_node(acpi_handle *handle)
 {
-       int pxm, node = -1;
+       int pxm, node = NUMA_NO_NODE;
 
        pxm = acpi_get_pxm(handle);
        if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
index e5f416c7f66e9e92e1ed988c3a709bc2820d56f4..a0c09adf7e7d2043587dad0384ba1374353989c7 100644 (file)
@@ -569,8 +569,10 @@ static const char * const table_sigs[] = {
 
 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
 
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
 
 void __init acpi_initrd_override(void *data, size_t size)
 {
@@ -579,8 +581,6 @@ void __init acpi_initrd_override(void *data, size_t size)
        struct acpi_table_header *table;
        char cpio_path[32] = "kernel/firmware/acpi/";
        struct cpio_data file;
-       struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
-       char *p;
 
        if (data == NULL || size == 0)
                return;
@@ -625,8 +625,8 @@ void __init acpi_initrd_override(void *data, size_t size)
                        table->signature, cpio_path, file.name, table->length);
 
                all_tables_size += table->length;
-               early_initrd_files[table_nr].data = file.data;
-               early_initrd_files[table_nr].size = file.size;
+               acpi_initrd_files[table_nr].data = file.data;
+               acpi_initrd_files[table_nr].size = file.size;
                table_nr++;
        }
        if (table_nr == 0)
@@ -652,14 +652,34 @@ void __init acpi_initrd_override(void *data, size_t size)
        memblock_reserve(acpi_tables_addr, all_tables_size);
        arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
-       p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+       /*
+        * early_ioremap only can remap 256k one time. If we map all
+        * tables one time, we will hit the limit. Need to map chunks
+        * one by one during copying the same as that in relocate_initrd().
+        */
        for (no = 0; no < table_nr; no++) {
-               memcpy(p + total_offset, early_initrd_files[no].data,
-                      early_initrd_files[no].size);
-               total_offset += early_initrd_files[no].size;
+               unsigned char *src_p = acpi_initrd_files[no].data;
+               phys_addr_t size = acpi_initrd_files[no].size;
+               phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+               phys_addr_t slop, clen;
+               char *dest_p;
+
+               total_offset += size;
+
+               while (size) {
+                       slop = dest_addr & ~PAGE_MASK;
+                       clen = size;
+                       if (clen > MAP_CHUNK_SIZE - slop)
+                               clen = MAP_CHUNK_SIZE - slop;
+                       dest_p = early_ioremap(dest_addr & PAGE_MASK,
+                                                clen + slop);
+                       memcpy(dest_p + slop, src_p, clen);
+                       early_iounmap(dest_p, clen + slop);
+                       src_p += clen;
+                       dest_addr += clen;
+                       size -= clen;
+               }
        }
-       early_iounmap(p, all_tables_size);
 }
 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
 
@@ -820,7 +840,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
 
 void acpi_os_sleep(u64 ms)
 {
-       schedule_timeout_interruptible(msecs_to_jiffies(ms));
+       msleep(ms);
 }
 
 void acpi_os_stall(u32 us)
@@ -1335,7 +1355,7 @@ static int __init acpi_os_name_setup(char *str)
        if (!str || !*str)
                return 0;
 
-       for (; count-- && str && *str; str++) {
+       for (; count-- && *str; str++) {
                if (isalnum(*str) || *str == ' ' || *str == ':')
                        *p++ = *str;
                else if (*str == '\'' || *str == '"')
index d3874f4256534d06aeb0fdb9f364e5f5851963f3..924ad92852c1a7fa69ea26f0ac8c2aaeb9d254fd 100644 (file)
@@ -49,10 +49,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
                             const struct acpi_device_id *not_used);
 static void acpi_pci_root_remove(struct acpi_device *device);
 
-#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
-                               | OSC_ACTIVE_STATE_PWR_SUPPORT \
-                               | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
-                               | OSC_MSI_SUPPORT)
+#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
+                               | OSC_PCI_ASPM_SUPPORT \
+                               | OSC_PCI_CLOCK_PM_SUPPORT \
+                               | OSC_PCI_MSI_SUPPORT)
 
 static const struct acpi_device_id root_device_ids[] = {
        {"PNP0A03", 0},
@@ -127,6 +127,55 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
        return AE_OK;
 }
 
+struct pci_osc_bit_struct {
+       u32 bit;
+       char *desc;
+};
+
+static struct pci_osc_bit_struct pci_osc_support_bit[] = {
+       { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
+       { OSC_PCI_ASPM_SUPPORT, "ASPM" },
+       { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
+       { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
+       { OSC_PCI_MSI_SUPPORT, "MSI" },
+};
+
+static struct pci_osc_bit_struct pci_osc_control_bit[] = {
+       { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
+       { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
+       { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
+       { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
+       { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+};
+
+static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
+                           struct pci_osc_bit_struct *table, int size)
+{
+       char buf[80];
+       int i, len = 0;
+       struct pci_osc_bit_struct *entry;
+
+       buf[0] = '\0';
+       for (i = 0, entry = table; i < size; i++, entry++)
+               if (word & entry->bit)
+                       len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+                                       len ? " " : "", entry->desc);
+
+       dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
+}
+
+static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+       decode_osc_bits(root, msg, word, pci_osc_support_bit,
+                       ARRAY_SIZE(pci_osc_support_bit));
+}
+
+static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+       decode_osc_bits(root, msg, word, pci_osc_control_bit,
+                       ARRAY_SIZE(pci_osc_control_bit));
+}
+
 static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
 
 static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -158,14 +207,14 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
        support &= OSC_PCI_SUPPORT_MASKS;
        support |= root->osc_support_set;
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = support;
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = support;
        if (control) {
                *control &= OSC_PCI_CONTROL_MASKS;
-               capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+               capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
        } else {
                /* Run _OSC query only with existing controls. */
-               capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+               capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
        }
 
        status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
@@ -180,11 +229,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
 static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
 {
        acpi_status status;
-       acpi_handle tmp;
 
-       status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
        mutex_lock(&osc_lock);
        status = acpi_pci_query_osc(root, flags, NULL);
        mutex_unlock(&osc_lock);
@@ -316,9 +361,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
 acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
 {
        struct acpi_pci_root *root;
-       acpi_status status;
+       acpi_status status = AE_OK;
        u32 ctrl, capbuf[3];
-       acpi_handle tmp;
 
        if (!mask)
                return AE_BAD_PARAMETER;
@@ -331,10 +375,6 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
        if (!root)
                return AE_NOT_EXIST;
 
-       status = acpi_get_handle(handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
-
        mutex_lock(&osc_lock);
 
        *mask = ctrl | root->osc_control_set;
@@ -349,17 +389,21 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
                        goto out;
                if (ctrl == *mask)
                        break;
+               decode_osc_control(root, "platform does not support",
+                                  ctrl & ~(*mask));
                ctrl = *mask;
        }
 
        if ((ctrl & req) != req) {
+               decode_osc_control(root, "not requesting control; platform does not support",
+                                  req & ~(ctrl));
                status = AE_SUPPORT;
                goto out;
        }
 
-       capbuf[OSC_QUERY_TYPE] = 0;
-       capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
-       capbuf[OSC_CONTROL_TYPE] = ctrl;
+       capbuf[OSC_QUERY_DWORD] = 0;
+       capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
+       capbuf[OSC_CONTROL_DWORD] = ctrl;
        status = acpi_pci_run_osc(handle, capbuf, mask);
        if (ACPI_SUCCESS(status))
                root->osc_control_set = *mask;
@@ -369,6 +413,87 @@ out:
 }
 EXPORT_SYMBOL(acpi_pci_osc_control_set);
 
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+                                int *clear_aspm)
+{
+       u32 support, control, requested;
+       acpi_status status;
+       struct acpi_device *device = root->device;
+       acpi_handle handle = device->handle;
+
+       /*
+        * All supported architectures that use ACPI have support for
+        * PCI domains, so we indicate this in _OSC support capabilities.
+        */
+       support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+       if (pci_ext_cfg_avail())
+               support |= OSC_PCI_EXT_CONFIG_SUPPORT;
+       if (pcie_aspm_support_enabled())
+               support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
+       if (pci_msi_enabled())
+               support |= OSC_PCI_MSI_SUPPORT;
+
+       decode_osc_support(root, "OS supports", support);
+       status = acpi_pci_osc_support(root, support);
+       if (ACPI_FAILURE(status)) {
+               dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+                        acpi_format_exception(status));
+               *no_aspm = 1;
+               return;
+       }
+
+       if (pcie_ports_disabled) {
+               dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
+               return;
+       }
+
+       if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
+               decode_osc_support(root, "not requesting OS control; OS requires",
+                                  ACPI_PCIE_REQ_SUPPORT);
+               return;
+       }
+
+       control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
+               | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+               | OSC_PCI_EXPRESS_PME_CONTROL;
+
+       if (pci_aer_available()) {
+               if (aer_acpi_firmware_first())
+                       dev_info(&device->dev,
+                                "PCIe AER handled by firmware\n");
+               else
+                       control |= OSC_PCI_EXPRESS_AER_CONTROL;
+       }
+
+       requested = control;
+       status = acpi_pci_osc_control_set(handle, &control,
+                                         OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
+       if (ACPI_SUCCESS(status)) {
+               decode_osc_control(root, "OS now controls", control);
+               if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+                       /*
+                        * We have ASPM control, but the FADT indicates
+                        * that it's unsupported. Clear it.
+                        */
+                       *clear_aspm = 1;
+               }
+       } else {
+               decode_osc_control(root, "OS requested", requested);
+               decode_osc_control(root, "platform willing to grant", control);
+               dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+                       acpi_format_exception(status));
+
+               /*
+                * We want to disable ASPM here, but aspm_disabled
+                * needs to remain in its state from boot so that we
+                * properly handle PCIe 1.1 devices.  So we set this
+                * flag here, to defer the action until after the ACPI
+                * root scan.
+                */
+               *no_aspm = 1;
+       }
+}
+
 static int acpi_pci_root_add(struct acpi_device *device,
                             const struct acpi_device_id *not_used)
 {
@@ -376,9 +501,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
        acpi_status status;
        int result;
        struct acpi_pci_root *root;
-       u32 flags, base_flags;
        acpi_handle handle = device->handle;
-       bool no_aspm = false, clear_aspm = false;
+       int no_aspm = 0, clear_aspm = 0;
 
        root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
        if (!root)
@@ -431,81 +555,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
 
        root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
 
-       /*
-        * All supported architectures that use ACPI have support for
-        * PCI domains, so we indicate this in _OSC support capabilities.
-        */
-       flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
-       acpi_pci_osc_support(root, flags);
-
-       if (pci_ext_cfg_avail())
-               flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
-       if (pcie_aspm_support_enabled()) {
-               flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
-               OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
-       }
-       if (pci_msi_enabled())
-               flags |= OSC_MSI_SUPPORT;
-       if (flags != base_flags) {
-               status = acpi_pci_osc_support(root, flags);
-               if (ACPI_FAILURE(status)) {
-                       dev_info(&device->dev, "ACPI _OSC support "
-                               "notification failed, disabling PCIe ASPM\n");
-                       no_aspm = true;
-                       flags = base_flags;
-               }
-       }
-
-       if (!pcie_ports_disabled
-           && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
-               flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
-                       | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
-                       | OSC_PCI_EXPRESS_PME_CONTROL;
-
-               if (pci_aer_available()) {
-                       if (aer_acpi_firmware_first())
-                               dev_dbg(&device->dev,
-                                       "PCIe errors handled by BIOS.\n");
-                       else
-                               flags |= OSC_PCI_EXPRESS_AER_CONTROL;
-               }
-
-               dev_info(&device->dev,
-                       "Requesting ACPI _OSC control (0x%02x)\n", flags);
-
-               status = acpi_pci_osc_control_set(handle, &flags,
-                                      OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-               if (ACPI_SUCCESS(status)) {
-                       dev_info(&device->dev,
-                               "ACPI _OSC control (0x%02x) granted\n", flags);
-                       if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
-                               /*
-                                * We have ASPM control, but the FADT indicates
-                                * that it's unsupported. Clear it.
-                                */
-                               clear_aspm = true;
-                       }
-               } else {
-                       dev_info(&device->dev,
-                               "ACPI _OSC request failed (%s), "
-                               "returned control mask: 0x%02x\n",
-                               acpi_format_exception(status), flags);
-                       dev_info(&device->dev,
-                                "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
-                       /*
-                        * We want to disable ASPM here, but aspm_disabled
-                        * needs to remain in its state from boot so that we
-                        * properly handle PCIe 1.1 devices.  So we set this
-                        * flag here, to defer the action until after the ACPI
-                        * root scan.
-                        */
-                       no_aspm = true;
-               }
-       } else {
-               dev_info(&device->dev,
-                        "Unable to request _OSC control "
-                        "(_OSC support mask: 0x%02x)\n", flags);
-       }
+       negotiate_os_control(root, &no_aspm, &clear_aspm);
 
        /*
         * TBD: Need PCI interface for enumeration/configuration of roots.
index 0dbe5cdf3396e5f53b23c84bb30041329544774d..c2ad391d8041ecb6a354e2df07b9d813d817a744 100644 (file)
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
 #define ACPI_POWER_RESOURCE_STATE_ON   0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-struct acpi_power_dependent_device {
-       struct list_head node;
-       struct acpi_device *adev;
-       struct work_struct work;
-};
-
 struct acpi_power_resource {
        struct acpi_device device;
        struct list_head list_node;
-       struct list_head dependent;
        char *name;
        u32 system_level;
        u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
        return 0;
 }
 
-static void acpi_power_resume_dependent(struct work_struct *work)
-{
-       struct acpi_power_dependent_device *dep;
-       struct acpi_device_physical_node *pn;
-       struct acpi_device *adev;
-       int state;
-
-       dep = container_of(work, struct acpi_power_dependent_device, work);
-       adev = dep->adev;
-       if (acpi_power_get_inferred_state(adev, &state))
-               return;
-
-       if (state > ACPI_STATE_D0)
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(pn, &adev->physical_node_list, node)
-               pm_request_resume(pn->dev);
-
-       list_for_each_entry(pn, &adev->power_dependent, node)
-               pm_request_resume(pn->dev);
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
        acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
                                  resource->name));
        } else {
                result = __acpi_power_on(resource);
-               if (result) {
+               if (result)
                        resource->ref_count--;
-               } else {
-                       struct acpi_power_dependent_device *dep;
-
-                       list_for_each_entry(dep, &resource->dependent, node)
-                               schedule_work(&dep->work);
-               }
        }
        return result;
 }
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
        return result;
 }
 
-static void acpi_power_add_dependent(struct acpi_power_resource *resource,
-                                    struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (!dep)
-               goto out;
-
-       dep->adev = adev;
-       INIT_WORK(&dep->work, acpi_power_resume_dependent);
-       list_add_tail(&dep->node, &resource->dependent);
-
- out:
-       mutex_unlock(&resource->resource_lock);
-}
-
-static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
-                                       struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-       struct work_struct *work = NULL;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev) {
-                       list_del(&dep->node);
-                       work = &dep->work;
-                       break;
-               }
-
-       mutex_unlock(&resource->resource_lock);
-
-       if (work) {
-               cancel_work_sync(work);
-               kfree(dep);
-       }
-}
-
 static struct attribute *attrs[] = {
        NULL,
 };
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
 
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
 {
-       struct acpi_device_power_state *ps;
-       struct acpi_power_resource_entry *entry;
        int state;
 
        if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
        if (!adev->power.flags.power_resources)
                return;
 
-       ps = &adev->power.states[ACPI_STATE_D0];
-       list_for_each_entry(entry, &ps->resources, node) {
-               struct acpi_power_resource *resource = entry->resource;
-
-               if (add)
-                       acpi_power_add_dependent(resource, adev);
-               else
-                       acpi_power_remove_dependent(resource, adev);
-       }
-
        for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
                acpi_power_expose_hide(adev,
                                       &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
        acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
                                ACPI_STA_DEFAULT);
        mutex_init(&resource->resource_lock);
-       INIT_LIST_HEAD(&resource->dependent);
        INIT_LIST_HEAD(&resource->list_node);
        resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
                mutex_lock(&resource->resource_lock);
 
                result = acpi_power_get_state(resource->device.handle, &state);
-               if (result)
+               if (result) {
+                       mutex_unlock(&resource->resource_lock);
                        continue;
+               }
 
                if (state == ACPI_POWER_RESOURCE_STATE_OFF
                    && resource->ref_count) {
index 04a13784dd20a4a7e42b15b98eedcff90cb67299..6a5b152ad4d070511d08d74439155fa17a83ed8a 100644 (file)
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
 #include "sleep.h"
 
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 
 /*
  * this file provides support for:
- * /proc/acpi/alarm
  * /proc/acpi/wakeup
  */
 
 ACPI_MODULE_NAME("sleep")
 
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define        HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
-       u32 sec, min, hr;
-       u32 day, mo, yr, cent = 0;
-       u32 today = 0;
-       unsigned char rtc_control = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-
-       rtc_control = CMOS_READ(RTC_CONTROL);
-       sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
-       min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
-       hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
-       /* If we ever get an FACP with proper values... */
-       if (acpi_gbl_FADT.day_alarm) {
-               /* ACPI spec: only low 6 its should be cared */
-               day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
-               if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-                       day = bcd2bin(day);
-       } else
-               day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-       if (acpi_gbl_FADT.month_alarm)
-               mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
-       else {
-               mo = cmos_bcd_read(RTC_MONTH, rtc_control);
-               today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-       }
-       if (acpi_gbl_FADT.century)
-               cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
-       yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
-       spin_unlock_irqrestore(&rtc_lock, flags);
-
-       /* we're trusting the FADT (see above) */
-       if (!acpi_gbl_FADT.century)
-               /* If we're not trusting the FADT, we should at least make it
-                * right for _this_ century... ehm, what is _this_ century?
-                *
-                * TBD:
-                *  ASAP: find piece of code in the kernel, e.g. star tracker driver,
-                *        which we can trust to determine the century correctly. Atom
-                *        watch driver would be nice, too...
-                *
-                *  if that has not happened, change for first release in 2050:
-                *        if (yr<50)
-                *                yr += 2100;
-                *        else
-                *                yr += 2000;   // current line of code
-                *
-                *  if that has not happened either, please do on 2099/12/31:23:59:59
-                *        s/2000/2100
-                *
-                */
-               yr += 2000;
-       else
-               yr += cent * 100;
-
-       /*
-        * Show correct dates for alarms up to a month into the future.
-        * This solves issues for nearly all situations with the common
-        * 30-day alarm clocks in PC hardware.
-        */
-       if (day < today) {
-               if (mo < 12) {
-                       mo += 1;
-               } else {
-                       mo = 1;
-                       yr += 1;
-               }
-       }
-
-       seq_printf(seq, "%4.4u-", yr);
-       (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
-       (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
-       (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
-       (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
-       (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
-       return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
-       char *next = NULL;
-       char *string_end = NULL;
-       int result = -EINVAL;
-
-       /*
-        * Try to find delimeter, only to insert null.  The end of the
-        * string won't have one, but is still valid.
-        */
-       if (*p == NULL)
-               return result;
-
-       next = strpbrk(*p, "- :");
-       if (next)
-               *next++ = '\0';
-
-       *value = simple_strtoul(*p, &string_end, 10);
-
-       /* Signal success if we got a good digit */
-       if (string_end != *p)
-               result = 0;
-
-       if (next)
-               *p = next;
-       else
-               *p = NULL;
-
-       return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
-       u32 val = CMOS_READ(offset);
-       if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-               val = bcd2bin(val);
-       return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
-       if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-               val = bin2bcd(val);
-       CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
-                       const char __user * buffer, size_t count, loff_t * ppos)
-{
-       int result = 0;
-       char alarm_string[30] = { '\0' };
-       char *p = alarm_string;
-       u32 sec, min, hr, day, mo, yr;
-       int adjust = 0;
-       unsigned char rtc_control = 0;
-
-       if (count > sizeof(alarm_string) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(alarm_string, buffer, count))
-               return -EFAULT;
-
-       alarm_string[count] = '\0';
-
-       /* check for time adjustment */
-       if (alarm_string[0] == '+') {
-               p++;
-               adjust = 1;
-       }
-
-       if ((result = get_date_field(&p, &yr)))
-               goto end;
-       if ((result = get_date_field(&p, &mo)))
-               goto end;
-       if ((result = get_date_field(&p, &day)))
-               goto end;
-       if ((result = get_date_field(&p, &hr)))
-               goto end;
-       if ((result = get_date_field(&p, &min)))
-               goto end;
-       if ((result = get_date_field(&p, &sec)))
-               goto end;
-
-       spin_lock_irq(&rtc_lock);
-
-       rtc_control = CMOS_READ(RTC_CONTROL);
-
-       if (adjust) {
-               yr += cmos_bcd_read(RTC_YEAR, rtc_control);
-               mo += cmos_bcd_read(RTC_MONTH, rtc_control);
-               day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-               hr += cmos_bcd_read(RTC_HOURS, rtc_control);
-               min += cmos_bcd_read(RTC_MINUTES, rtc_control);
-               sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
-       }
-
-       spin_unlock_irq(&rtc_lock);
-
-       if (sec > 59) {
-               min += sec/60;
-               sec = sec%60;
-       }
-       if (min > 59) {
-               hr += min/60;
-               min = min%60;
-       }
-       if (hr > 23) {
-               day += hr/24;
-               hr = hr%24;
-       }
-       if (day > 31) {
-               mo += day/32;
-               day = day%32;
-       }
-       if (mo > 12) {
-               yr += mo/13;
-               mo = mo%13;
-       }
-
-       spin_lock_irq(&rtc_lock);
-       /*
-        * Disable alarm interrupt before setting alarm timer or else
-        * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
-        */
-       rtc_control &= ~RTC_AIE;
-       CMOS_WRITE(rtc_control, RTC_CONTROL);
-       CMOS_READ(RTC_INTR_FLAGS);
-
-       /* write the fields the rtc knows about */
-       cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
-       cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
-       cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
-       /*
-        * If the system supports an enhanced alarm it will have non-zero
-        * offsets into the CMOS RAM here -- which for some reason are pointing
-        * to the RTC area of memory.
-        */
-       if (acpi_gbl_FADT.day_alarm)
-               cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
-       if (acpi_gbl_FADT.month_alarm)
-               cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
-       if (acpi_gbl_FADT.century) {
-               if (adjust)
-                       yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
-               cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
-       }
-       /* enable the rtc alarm interrupt */
-       rtc_control |= RTC_AIE;
-       CMOS_WRITE(rtc_control, RTC_CONTROL);
-       CMOS_READ(RTC_INTR_FLAGS);
-
-       spin_unlock_irq(&rtc_lock);
-
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_enable_event(ACPI_EVENT_RTC, 0);
-
-       *ppos += count;
-
-       result = 0;
-      end:
-       return result ? result : count;
-}
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
 static int
 acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
 {
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
        .release = single_release,
 };
 
-#ifdef HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_system_alarm_open_fs,
-       .read = seq_read,
-       .write = acpi_system_write_alarm,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_disable_event(ACPI_EVENT_RTC, 0);
-
-       return ACPI_INTERRUPT_HANDLED;
-}
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
 int __init acpi_sleep_proc_init(void)
 {
-#ifdef HAVE_ACPI_LEGACY_ALARM
-       /* 'alarm' [R/W] */
-       proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-                   acpi_root_dir, &acpi_system_alarm_fops);
-
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
-       /*
-        * Disable the RTC event after installing RTC handler.
-        * Only when RTC alarm is set will it be enabled.
-        */
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
        /* 'wakeup device' [R/W] */
        proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
                    acpi_root_dir, &acpi_system_wakeup_device_fops);
index cf34d903f4fb4a45b7975fc5fce60d141de2b7a9..b3171f30b319c1244007e686a0b516833443191d 100644 (file)
@@ -162,16 +162,23 @@ exit:
        return apic_id;
 }
 
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
 {
-#ifdef CONFIG_SMP
-       int i;
-#endif
-       int apic_id = -1;
+       int apic_id;
 
        apic_id = map_mat_entry(handle, type, acpi_id);
        if (apic_id == -1)
                apic_id = map_madt_entry(type, acpi_id);
+
+       return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+       int i;
+#endif
+
        if (apic_id == -1) {
                /*
                 * On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 #endif
        return -1;
 }
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+       int apic_id;
+
+       apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+       return acpi_map_cpuid(apic_id, acpi_id);
+}
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
 static bool __init processor_physically_present(acpi_handle handle)
index e534ba66d5b80861849ae705bf941ac570720607..40fc773ab6e078a0f9d06e7d9b3faf497ec49169 100644 (file)
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
            .notifier_call = acpi_cpu_soft_notify,
 };
 
index f98dd00b51a94b2d7e2d7dc9cd15f3e941e81bff..35c8f2bbcc40b45510ea7d1b67ee294efc33471b 100644 (file)
@@ -272,9 +272,6 @@ static void tsc_check_state(int state) { return; }
 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 {
 
-       if (!pr)
-               return -EINVAL;
-
        if (!pr->pblk)
                return -ENODEV;
 
@@ -1076,12 +1073,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
        if (disabled_by_idle_boot_param())
                return 0;
 
-       if (!pr)
-               return -EINVAL;
-
-       if (nocst) {
+       if (nocst)
                return -ENODEV;
-       }
 
        if (!pr->flags.power_setup_done)
                return -ENODEV;
@@ -1108,9 +1101,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
        if (disabled_by_idle_boot_param())
                return 0;
 
-       if (!pr)
-               return -EINVAL;
-
        if (nocst)
                return -ENODEV;
 
@@ -1183,9 +1173,6 @@ int acpi_processor_power_init(struct acpi_processor *pr)
                first_run++;
        }
 
-       if (!pr)
-               return -EINVAL;
-
        if (acpi_gbl_FADT.cst_control && !nocst) {
                status =
                    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
index aef7e1cd1e5d62f95512484935e4fea9b11af961..d465ae6cdd004b9813cc333529c54ef29abcb16f 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <linux/acpi.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
 #define MAX_SBS_BAT                    4
 #define ACPI_SBS_BLOCK_MAX             32
 
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
 struct acpi_battery {
        struct power_supply bat;
        struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       struct proc_dir_entry *proc_entry;
-#endif
        unsigned long update_time;
        char name[8];
        char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@ struct acpi_sbs {
        struct acpi_device *device;
        struct acpi_smb_hc *hc;
        struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       struct proc_dir_entry *charger_entry;
-#endif
        struct acpi_battery battery[MAX_SBS_BAT];
        u8 batteries_supported:4;
        u8 manager_present:1;
@@ -481,261 +464,6 @@ static struct device_attribute alarm_attr = {
        .store = acpi_battery_alarm_store,
 };
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc/acpi)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
-               struct proc_dir_entry *parent_dir,
-               char *dir_name,
-               const struct file_operations *info_fops,
-               const struct file_operations *state_fops,
-               const struct file_operations *alarm_fops, void *data)
-{
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!*dir) {
-               *dir = proc_mkdir(dir_name, parent_dir);
-               if (!*dir) {
-                       return -ENODEV;
-               }
-       }
-
-       /* 'info' [R] */
-       if (info_fops)
-               proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
-                                info_fops, data);
-
-       /* 'state' [R] */
-       if (state_fops)
-               proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
-                                state_fops, data);
-
-       /* 'alarm' [R/W] */
-       if (alarm_fops)
-               proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
-                                alarm_fops, data);
-       return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-       return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int result = 0;
-
-       mutex_lock(&sbs->lock);
-
-       seq_printf(seq, "present:                 %s\n",
-                  (battery->present) ? "yes" : "no");
-       if (!battery->present)
-               goto end;
-
-       seq_printf(seq, "design capacity:         %i%sh\n",
-                  battery->design_capacity * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "last full capacity:      %i%sh\n",
-                  battery->full_charge_capacity * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "battery technology:      rechargeable\n");
-       seq_printf(seq, "design voltage:          %i mV\n",
-                  battery->design_voltage * acpi_battery_vscale(battery));
-       seq_printf(seq, "design capacity warning: unknown\n");
-       seq_printf(seq, "design capacity low:     unknown\n");
-       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
-       seq_printf(seq, "capacity granularity 1:  unknown\n");
-       seq_printf(seq, "capacity granularity 2:  unknown\n");
-       seq_printf(seq, "model number:            %s\n", battery->device_name);
-       seq_printf(seq, "serial number:           %i\n",
-                  battery->serial_number);
-       seq_printf(seq, "battery type:            %s\n",
-                  battery->device_chemistry);
-       seq_printf(seq, "OEM info:                %s\n",
-                  battery->manufacturer_name);
-      end:
-       mutex_unlock(&sbs->lock);
-       return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int rate;
-
-       mutex_lock(&sbs->lock);
-       seq_printf(seq, "present:                 %s\n",
-                  (battery->present) ? "yes" : "no");
-       if (!battery->present)
-               goto end;
-
-       acpi_battery_get_state(battery);
-       seq_printf(seq, "capacity state:          %s\n",
-                  (battery->state & 0x0010) ? "critical" : "ok");
-       seq_printf(seq, "charging state:          %s\n",
-                  (battery->rate_now < 0) ? "discharging" :
-                  ((battery->rate_now > 0) ? "charging" : "charged"));
-       rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
-       rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
-                       acpi_battery_vscale(battery)/1000):1;
-       seq_printf(seq, "present rate:            %d%s\n", rate,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "remaining capacity:      %i%sh\n",
-                  battery->capacity_now * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "present voltage:         %i mV\n",
-                  battery->voltage_now * acpi_battery_vscale(battery));
-
-      end:
-       mutex_unlock(&sbs->lock);
-       return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int result = 0;
-
-       mutex_lock(&sbs->lock);
-
-       if (!battery->present) {
-               seq_printf(seq, "present:                 no\n");
-               goto end;
-       }
-
-       acpi_battery_get_alarm(battery);
-       seq_printf(seq, "alarm:                   ");
-       if (battery->alarm_capacity)
-               seq_printf(seq, "%i%sh\n",
-                          battery->alarm_capacity *
-                          acpi_battery_scale(battery),
-                          acpi_battery_units(battery));
-       else
-               seq_printf(seq, "disabled\n");
-      end:
-       mutex_unlock(&sbs->lock);
-       return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
-                        size_t count, loff_t * ppos)
-{
-       struct seq_file *seq = file->private_data;
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       char alarm_string[12] = { '\0' };
-       int result = 0;
-       mutex_lock(&sbs->lock);
-       if (!battery->present) {
-               result = -ENODEV;
-               goto end;
-       }
-       if (count > sizeof(alarm_string) - 1) {
-               result = -EINVAL;
-               goto end;
-       }
-       if (copy_from_user(alarm_string, buffer, count)) {
-               result = -EFAULT;
-               goto end;
-       }
-       alarm_string[count] = 0;
-       battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
-                                       acpi_battery_scale(battery);
-       acpi_battery_set_alarm(battery);
-      end:
-       mutex_unlock(&sbs->lock);
-       if (result)
-               return result;
-       return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
-       .open = acpi_battery_info_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
-       .open = acpi_battery_state_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
-       .open = acpi_battery_alarm_open_fs,
-       .read = seq_read,
-       .write = acpi_battery_write_alarm,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
-       struct acpi_sbs *sbs = seq->private;
-
-       mutex_lock(&sbs->lock);
-
-       seq_printf(seq, "state:                   %s\n",
-                  sbs->charger_present ? "on-line" : "off-line");
-
-       mutex_unlock(&sbs->lock);
-       return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
-       .open = acpi_ac_state_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-#endif
-
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
                return result;
 
        sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
-                       battery->name, &acpi_battery_info_fops,
-                       &acpi_battery_state_fops, &acpi_battery_alarm_fops,
-                       battery);
-#endif
        battery->bat.name = battery->name;
        battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
        if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
                        device_remove_file(battery->bat.dev, &alarm_attr);
                power_supply_unregister(&battery->bat);
        }
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       proc_remove(battery->proc_entry);
-       battery->proc_entry = NULL;
-#endif
 }
 
 static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
        result = acpi_ac_get_present(sbs);
        if (result)
                goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
-                                ACPI_AC_DIR_NAME, NULL,
-                                &acpi_ac_state_fops, NULL, sbs);
-       if (result)
-               goto end;
-#endif
+
        sbs->charger.name = "sbs-charger";
        sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
        sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
 {
        if (sbs->charger.dev)
                power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       proc_remove(sbs->charger_entry);
-       sbs->charger_entry = NULL;
-#endif
 }
 
 static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device)
        return 0;
 }
 
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       if (acpi_ac_dir) {
-               acpi_unlock_ac_dir(acpi_ac_dir);
-               acpi_ac_dir = NULL;
-       }
-       if (acpi_battery_dir) {
-               acpi_unlock_battery_dir(acpi_battery_dir);
-               acpi_battery_dir = NULL;
-       }
-#endif
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int acpi_sbs_resume(struct device *dev)
 {
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void)
 
        if (acpi_disabled)
                return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_dir = acpi_lock_ac_dir();
-       if (!acpi_ac_dir)
-               return -ENODEV;
-       acpi_battery_dir = acpi_lock_battery_dir();
-       if (!acpi_battery_dir) {
-               acpi_sbs_rmdirs();
-               return -ENODEV;
-       }
-#endif
+
        result = acpi_bus_register_driver(&acpi_sbs_driver);
-       if (result < 0) {
-               acpi_sbs_rmdirs();
+       if (result < 0)
                return -ENODEV;
-       }
+
        return 0;
 }
 
 static void __exit acpi_sbs_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_sbs_driver);
-       acpi_sbs_rmdirs();
        return;
 }
 
index fbdb82e70d10623c0bbf94aa73723a40fd32e77d..fee8a297c7d95310caa64492b75e6c2fae0c280e 100644 (file)
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(acpi_bus_get_device);
+EXPORT_SYMBOL(acpi_bus_get_device);
 
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
        INIT_LIST_HEAD(&device->wakeup_list);
        INIT_LIST_HEAD(&device->physical_node_list);
        mutex_init(&device->physical_node_lock);
-       INIT_LIST_HEAD(&device->power_dependent);
 
        new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
        if (!new_bus_id) {
@@ -1121,7 +1120,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
 EXPORT_SYMBOL(acpi_bus_register_driver);
 
 /**
- * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
  * @driver: driver to unregister
  *
  * Unregisters a driver with the ACPI bus.  Searches the namespace for all
index 05306a59aedc18b45dd89f636d907def41d67d11..db5293650f622108e80a09124097f469c727547f 100644 (file)
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj,
        acpi_event_status status;
        acpi_handle handle;
        int result = 0;
+       unsigned long tmp;
 
        if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
                int i;
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj,
                else if (!strcmp(buf, "clear\n") &&
                         (status & ACPI_EVENT_FLAG_SET))
                        result = acpi_clear_gpe(handle, index);
+               else if (!kstrtoul(buf, 0, &tmp))
+                       all_counters[index].count = tmp;
                else
-                       all_counters[index].count = strtoul(buf, NULL, 0);
+                       result = -EINVAL;
        } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
                int event = index - num_gpes;
                if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj,
                else if (!strcmp(buf, "clear\n") &&
                         (status & ACPI_EVENT_FLAG_SET))
                        result = acpi_clear_event(event);
+               else if (!kstrtoul(buf, 0, &tmp))
+                       all_counters[index].count = tmp;
                else
-                       all_counters[index].count = strtoul(buf, NULL, 0);
+                       result = -EINVAL;
        } else
                all_counters[index].count = strtoul(buf, NULL, 0);
 
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
        if (!hotplug_kobj)
                goto err_out;
 
-       kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
-       error = kobject_set_name(&hotplug->kobj, "%s", name);
-       if (error)
-               goto err_out;
-
-       hotplug->kobj.parent = hotplug_kobj;
-       error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+       error = kobject_init_and_add(&hotplug->kobj,
+               &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
        if (error)
                goto err_out;
 
index 6a0329340b42ad188add760db66ebcc17ba9ab22..0d9f46b5ae6d100a64ca3c3410ab23ac2af361fe 100644 (file)
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                          "No critical threshold\n"));
                } else if (tmp <= 2732) {
-                       printk(KERN_WARNING FW_BUG "Invalid critical threshold "
-                              "(%llu)\n", tmp);
+                       pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+                               tmp);
                        tz->trips.critical.flags.valid = 0;
                } else {
                        tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                                 * Allow override critical threshold
                                 */
                                if (crt_k > tz->trips.critical.temperature)
-                                       printk(KERN_WARNING PREFIX
-                                               "Critical threshold %d C\n", crt);
+                                       pr_warn(PREFIX "Critical threshold %d C\n",
+                                               crt);
                                tz->trips.critical.temperature = crt_k;
                        }
                }
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                status = acpi_evaluate_reference(tz->device->handle, "_PSL",
                                                        NULL, &devices);
                if (ACPI_FAILURE(status)) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid passive threshold\n");
+                       pr_warn(PREFIX "Invalid passive threshold\n");
                        tz->trips.passive.flags.valid = 0;
                }
                else
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                        status = acpi_evaluate_reference(tz->device->handle,
                                                name, NULL, &devices);
                        if (ACPI_FAILURE(status)) {
-                               printk(KERN_WARNING PREFIX
-                                       "Invalid active%d threshold\n", i);
+                               pr_warn(PREFIX "Invalid active%d threshold\n",
+                                       i);
                                tz->trips.active[i].flags.valid = 0;
                        }
                        else
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
                valid |= tz->trips.active[i].flags.valid;
 
        if (!valid) {
-               printk(KERN_WARNING FW_BUG "No valid trip found\n");
+               pr_warn(FW_BUG "No valid trip found\n");
                return -ENODEV;
        }
        return 0;
@@ -515,10 +514,9 @@ static void acpi_thermal_check(void *data)
 {
        struct acpi_thermal *tz = data;
 
-       if (!tz->tz_enabled) {
-               pr_warn("thermal zone is disabled \n");
+       if (!tz->tz_enabled)
                return;
-       }
+
        thermal_zone_device_update(tz->thermal_zone);
 }
 
@@ -570,9 +568,10 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
         */
        if (mode == THERMAL_DEVICE_ENABLED)
                enable = 1;
-       else if (mode == THERMAL_DEVICE_DISABLED)
+       else if (mode == THERMAL_DEVICE_DISABLED) {
                enable = 0;
-       else
+               pr_warn("thermal zone will be disabled\n");
+       } else
                return -EINVAL;
 
        if (enable != tz->tz_enabled) {
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
                                  acpi_bus_private_data_handler,
                                  tz->thermal_zone);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX
-                               "Error attaching device data\n");
+               pr_err(PREFIX "Error attaching device data\n");
                return -ENODEV;
        }
 
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
-       printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
-              acpi_device_name(device), acpi_device_bid(device),
-              KELVIN_TO_CELSIUS(tz->temperature));
+       pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+               acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
        goto end;
 
 free_memory:
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev)
 static int thermal_act(const struct dmi_system_id *d) {
 
        if (act == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "disabling all active thermal trip points\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "disabling all active thermal trip points\n", d->ident);
                act = -1;
        }
        return 0;
 }
 static int thermal_nocrt(const struct dmi_system_id *d) {
 
-       printk(KERN_NOTICE "ACPI: %s detected: "
-               "disabling all critical thermal trip point actions.\n", d->ident);
+       pr_notice(PREFIX "%s detected: "
+                 "disabling all critical thermal trip point actions.\n", d->ident);
        nocrt = 1;
        return 0;
 }
 static int thermal_tzp(const struct dmi_system_id *d) {
 
        if (tzp == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "enabling thermal zone polling\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "enabling thermal zone polling\n", d->ident);
                tzp = 300;      /* 300 dS = 30 Seconds */
        }
        return 0;
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
 static int thermal_psv(const struct dmi_system_id *d) {
 
        if (psv == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "disabling all passive thermal trip points\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "disabling all passive thermal trip points\n", d->ident);
                psv = -1;
        }
        return 0;
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void)
        dmi_check_system(thermal_dmi_table);
 
        if (off) {
-               printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+               pr_notice(PREFIX "thermal control disabled\n");
                return -ENODEV;
        }
 
index 552248b0005b01a241ab511e52eef08a1c15d244..fc2cd328408053dfaa37eceb227de974b2796bf6 100644 (file)
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package,
        /*
         * Validate output buffer.
         */
-       if (buffer->length < size_required) {
+       if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+               buffer->pointer = ACPI_ALLOCATE(size_required);
+               if (!buffer->pointer)
+                       return AE_NO_MEMORY;
                buffer->length = size_required;
-               return AE_BUFFER_OVERFLOW;
-       } else if (buffer->length != size_required || !buffer->pointer) {
-               return AE_BAD_PARAMETER;
+               memset(buffer->pointer, 0, size_required);
+       } else {
+               if (buffer->length < size_required) {
+                       buffer->length = size_required;
+                       return AE_BUFFER_OVERFLOW;
+               } else if (buffer->length != size_required ||
+                          !buffer->pointer) {
+                       return AE_BAD_PARAMETER;
+               }
        }
 
        head = buffer->pointer;
index aebcf6355df467e3c394c00fa5c48731e42cc18d..d020df5a732ab1109d9fd3ddcaa9c7ff871b192b 100644 (file)
@@ -88,7 +88,16 @@ module_param(allow_duplicates, bool, 0644);
 static bool use_bios_initial_backlight = 1;
 module_param(use_bios_initial_backlight, bool, 0644);
 
+/*
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
+ */
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
+
 static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device);
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +166,7 @@ struct acpi_video_bus {
        struct acpi_video_bus_flags flags;
        struct list_head video_device_list;
        struct mutex device_list_lock;  /* protects video_device_list */
+       struct list_head entry;
        struct input_dev *input;
        char phys[32];  /* for input device */
        struct notifier_block pm_nb;
@@ -229,6 +239,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
                                         int event);
 
+static bool acpi_video_verify_backlight_support(void)
+{
+       if (acpi_osi_is_win8() && use_native_backlight &&
+           backlight_device_registered(BACKLIGHT_RAW))
+               return false;
+       return acpi_video_backlight_support();
+}
+
 /* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
 {
@@ -884,79 +902,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
 
        if (acpi_has_method(device->dev->handle, "_DDC"))
                device->cap._DDC = 1;
-
-       if (acpi_video_backlight_support()) {
-               struct backlight_properties props;
-               struct pci_dev *pdev;
-               acpi_handle acpi_parent;
-               struct device *parent = NULL;
-               int result;
-               static int count;
-               char *name;
-
-               result = acpi_video_init_brightness(device);
-               if (result)
-                       return;
-               name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-               if (!name)
-                       return;
-               count++;
-
-               acpi_get_parent(device->dev->handle, &acpi_parent);
-
-               pdev = acpi_get_pci_dev(acpi_parent);
-               if (pdev) {
-                       parent = &pdev->dev;
-                       pci_dev_put(pdev);
-               }
-
-               memset(&props, 0, sizeof(struct backlight_properties));
-               props.type = BACKLIGHT_FIRMWARE;
-               props.max_brightness = device->brightness->count - 3;
-               device->backlight = backlight_device_register(name,
-                                                             parent,
-                                                             device,
-                                                             &acpi_backlight_ops,
-                                                             &props);
-               kfree(name);
-               if (IS_ERR(device->backlight))
-                       return;
-
-               /*
-                * Save current brightness level in case we have to restore it
-                * before acpi_video_device_lcd_set_level() is called next time.
-                */
-               device->backlight->props.brightness =
-                               acpi_video_get_brightness(device->backlight);
-
-               device->cooling_dev = thermal_cooling_device_register("LCD",
-                                       device->dev, &video_cooling_ops);
-               if (IS_ERR(device->cooling_dev)) {
-                       /*
-                        * Set cooling_dev to NULL so we don't crash trying to
-                        * free it.
-                        * Also, why the hell we are returning early and
-                        * not attempt to register video output if cooling
-                        * device registration failed?
-                        * -- dtor
-                        */
-                       device->cooling_dev = NULL;
-                       return;
-               }
-
-               dev_info(&device->dev->dev, "registered as cooling_device%d\n",
-                        device->cooling_dev->id);
-               result = sysfs_create_link(&device->dev->dev.kobj,
-                               &device->cooling_dev->device.kobj,
-                               "thermal_cooling");
-               if (result)
-                       printk(KERN_ERR PREFIX "Create sysfs link\n");
-               result = sysfs_create_link(&device->cooling_dev->device.kobj,
-                               &device->dev->dev.kobj, "device");
-               if (result)
-                       printk(KERN_ERR PREFIX "Create sysfs link\n");
-
-       }
 }
 
 /*
@@ -1143,13 +1088,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
        acpi_video_device_bind(video, data);
        acpi_video_device_find_cap(data);
 
-       status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-                                            acpi_video_device_notify, data);
-       if (ACPI_FAILURE(status))
-               dev_err(&device->dev, "Error installing notify handler\n");
-       else
-               data->flags.notify = 1;
-
        mutex_lock(&video->device_list_lock);
        list_add_tail(&data->entry, &video->video_device_list);
        mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1271,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
        unsigned long long level_current, level_next;
        int result = -EINVAL;
 
-       /* no warning message if acpi_backlight=vendor is used */
-       if (!acpi_video_backlight_support())
+       /* no warning message if acpi_backlight=vendor or a quirk is used */
+       if (!acpi_video_verify_backlight_support())
                return 0;
 
        if (!device->brightness)
@@ -1454,64 +1392,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
        return status;
 }
 
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
-       acpi_status status;
-
-       if (!device || !device->video)
-               return -ENOENT;
-
-       if (device->flags.notify) {
-               status = acpi_remove_notify_handler(device->dev->handle,
-                               ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
-               if (ACPI_FAILURE(status))
-                       dev_err(&device->dev->dev,
-                                       "Can't remove video notify handler\n");
-       }
-
-       if (device->backlight) {
-               backlight_device_unregister(device->backlight);
-               device->backlight = NULL;
-       }
-       if (device->cooling_dev) {
-               sysfs_remove_link(&device->dev->dev.kobj,
-                                 "thermal_cooling");
-               sysfs_remove_link(&device->cooling_dev->device.kobj,
-                                 "device");
-               thermal_cooling_device_unregister(device->cooling_dev);
-               device->cooling_dev = NULL;
-       }
-
-       return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
-       int status;
-       struct acpi_video_device *dev, *next;
-
-       mutex_lock(&video->device_list_lock);
-
-       list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
-               status = acpi_video_bus_put_one_device(dev);
-               if (ACPI_FAILURE(status))
-                       printk(KERN_WARNING PREFIX
-                              "hhuuhhuu bug in acpi video driver.\n");
-
-               if (dev->brightness) {
-                       kfree(dev->brightness->levels);
-                       kfree(dev->brightness);
-               }
-               list_del(&dev->entry);
-               kfree(dev);
-       }
-
-       mutex_unlock(&video->device_list_lock);
-
-       return 0;
-}
-
 /* acpi_video interface */
 
 /*
@@ -1521,13 +1401,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 {
        return acpi_video_bus_DOS(video, 0,
-                                 acpi_video_backlight_quirks() ? 1 : 0);
+                                 acpi_osi_is_win8() ? 1 : 0);
 }
 
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
 {
        return acpi_video_bus_DOS(video, 0,
-                                 acpi_video_backlight_quirks() ? 0 : 1);
+                                 acpi_osi_is_win8() ? 0 : 1);
 }
 
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1416,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
        struct input_dev *input;
        int keycode = 0;
 
-       if (!video)
+       if (!video || !video->input)
                return;
 
        input = video->input;
@@ -1691,12 +1571,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
        return AE_OK;
 }
 
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+       if (acpi_video_verify_backlight_support()) {
+               struct backlight_properties props;
+               struct pci_dev *pdev;
+               acpi_handle acpi_parent;
+               struct device *parent = NULL;
+               int result;
+               static int count;
+               char *name;
+
+               result = acpi_video_init_brightness(device);
+               if (result)
+                       return;
+               name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+               if (!name)
+                       return;
+               count++;
+
+               acpi_get_parent(device->dev->handle, &acpi_parent);
+
+               pdev = acpi_get_pci_dev(acpi_parent);
+               if (pdev) {
+                       parent = &pdev->dev;
+                       pci_dev_put(pdev);
+               }
+
+               memset(&props, 0, sizeof(struct backlight_properties));
+               props.type = BACKLIGHT_FIRMWARE;
+               props.max_brightness = device->brightness->count - 3;
+               device->backlight = backlight_device_register(name,
+                                                             parent,
+                                                             device,
+                                                             &acpi_backlight_ops,
+                                                             &props);
+               kfree(name);
+               if (IS_ERR(device->backlight))
+                       return;
+
+               /*
+                * Save current brightness level in case we have to restore it
+                * before acpi_video_device_lcd_set_level() is called next time.
+                */
+               device->backlight->props.brightness =
+                               acpi_video_get_brightness(device->backlight);
+
+               device->cooling_dev = thermal_cooling_device_register("LCD",
+                                       device->dev, &video_cooling_ops);
+               if (IS_ERR(device->cooling_dev)) {
+                       /*
+                        * Set cooling_dev to NULL so we don't crash trying to
+                        * free it.
+                        * Also, why the hell we are returning early and
+                        * not attempt to register video output if cooling
+                        * device registration failed?
+                        * -- dtor
+                        */
+                       device->cooling_dev = NULL;
+                       return;
+               }
+
+               dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+                        device->cooling_dev->id);
+               result = sysfs_create_link(&device->dev->dev.kobj,
+                               &device->cooling_dev->device.kobj,
+                               "thermal_cooling");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+               result = sysfs_create_link(&device->cooling_dev->device.kobj,
+                               &device->dev->dev.kobj, "device");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+       }
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_register_backlight(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       video->pm_nb.notifier_call = acpi_video_resume;
+       video->pm_nb.priority = 0;
+       return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+       if (device->backlight) {
+               backlight_device_unregister(device->backlight);
+               device->backlight = NULL;
+       }
+       if (device->brightness) {
+               kfree(device->brightness->levels);
+               kfree(device->brightness);
+               device->brightness = NULL;
+       }
+       if (device->cooling_dev) {
+               sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+               sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+               thermal_cooling_device_unregister(device->cooling_dev);
+               device->cooling_dev = NULL;
+       }
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+       int error = unregister_pm_notifier(&video->pm_nb);
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_unregister_backlight(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+       acpi_status status;
+       struct acpi_device *adev = device->dev;
+
+       status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+                                            acpi_video_device_notify, device);
+       if (ACPI_FAILURE(status))
+               dev_err(&adev->dev, "Error installing notify handler\n");
+       else
+               device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+       struct input_dev *input;
+       struct acpi_video_device *dev;
+       int error;
+
+       video->input = input = input_allocate_device();
+       if (!input) {
+               error = -ENOMEM;
+               goto out;
+       }
+
+       error = acpi_video_bus_start_devices(video);
+       if (error)
+               goto err_free_input;
+
+       snprintf(video->phys, sizeof(video->phys),
+                       "%s/video/input0", acpi_device_hid(video->device));
+
+       input->name = acpi_device_name(video->device);
+       input->phys = video->phys;
+       input->id.bustype = BUS_HOST;
+       input->id.product = 0x06;
+       input->dev.parent = &video->device->dev;
+       input->evbit[0] = BIT(EV_KEY);
+       set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+       set_bit(KEY_VIDEO_NEXT, input->keybit);
+       set_bit(KEY_VIDEO_PREV, input->keybit);
+       set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+       set_bit(KEY_BRIGHTNESSUP, input->keybit);
+       set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+       set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+       set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+       error = input_register_device(input);
+       if (error)
+               goto err_stop_dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_add_notify_handler(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       return 0;
+
+err_stop_dev:
+       acpi_video_bus_stop_devices(video);
+err_free_input:
+       input_free_device(input);
+       video->input = NULL;
+out:
+       return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+       if (dev->flags.notify) {
+               acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+                                          acpi_video_device_notify);
+               dev->flags.notify = 0;
+       }
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_remove_notify_handler(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       acpi_video_bus_stop_devices(video);
+       input_unregister_device(video->input);
+       video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev, *next;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+               list_del(&dev->entry);
+               kfree(dev);
+       }
+       mutex_unlock(&video->device_list_lock);
+
+       return 0;
+}
+
 static int instance;
 
 static int acpi_video_bus_add(struct acpi_device *device)
 {
        struct acpi_video_bus *video;
-       struct input_dev *input;
        int error;
        acpi_status status;
 
@@ -1748,62 +1852,24 @@ static int acpi_video_bus_add(struct acpi_device *device)
        if (error)
                goto err_put_video;
 
-       video->input = input = input_allocate_device();
-       if (!input) {
-               error = -ENOMEM;
-               goto err_put_video;
-       }
-
-       error = acpi_video_bus_start_devices(video);
-       if (error)
-               goto err_free_input_dev;
-
-       snprintf(video->phys, sizeof(video->phys),
-               "%s/video/input0", acpi_device_hid(video->device));
-
-       input->name = acpi_device_name(video->device);
-       input->phys = video->phys;
-       input->id.bustype = BUS_HOST;
-       input->id.product = 0x06;
-       input->dev.parent = &device->dev;
-       input->evbit[0] = BIT(EV_KEY);
-       set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
-       set_bit(KEY_VIDEO_NEXT, input->keybit);
-       set_bit(KEY_VIDEO_PREV, input->keybit);
-       set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
-       set_bit(KEY_BRIGHTNESSUP, input->keybit);
-       set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
-       set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
-       set_bit(KEY_DISPLAY_OFF, input->keybit);
-
        printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
               ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
               video->flags.multihead ? "yes" : "no",
               video->flags.rom ? "yes" : "no",
               video->flags.post ? "yes" : "no");
+       mutex_lock(&video_list_lock);
+       list_add_tail(&video->entry, &video_bus_head);
+       mutex_unlock(&video_list_lock);
 
-       video->pm_nb.notifier_call = acpi_video_resume;
-       video->pm_nb.priority = 0;
-       error = register_pm_notifier(&video->pm_nb);
-       if (error)
-               goto err_stop_video;
-
-       error = input_register_device(input);
-       if (error)
-               goto err_unregister_pm_notifier;
+       acpi_video_bus_register_backlight(video);
+       acpi_video_bus_add_notify_handler(video);
 
        return 0;
 
- err_unregister_pm_notifier:
-       unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
-       acpi_video_bus_stop_devices(video);
- err_free_input_dev:
-       input_free_device(input);
- err_put_video:
+err_put_video:
        acpi_video_bus_put_devices(video);
        kfree(video->attached_array);
- err_free_video:
+err_free_video:
        kfree(video);
        device->driver_data = NULL;
 
@@ -1820,12 +1886,14 @@ static int acpi_video_bus_remove(struct acpi_device *device)
 
        video = acpi_driver_data(device);
 
-       unregister_pm_notifier(&video->pm_nb);
-
-       acpi_video_bus_stop_devices(video);
+       acpi_video_bus_remove_notify_handler(video);
+       acpi_video_bus_unregister_backlight(video);
        acpi_video_bus_put_devices(video);
 
-       input_unregister_device(video->input);
+       mutex_lock(&video_list_lock);
+       list_del(&video->entry);
+       mutex_unlock(&video_list_lock);
+
        kfree(video->attached_array);
        kfree(video);
 
@@ -1874,6 +1942,9 @@ int acpi_video_register(void)
                return 0;
        }
 
+       mutex_init(&video_list_lock);
+       INIT_LIST_HEAD(&video_bus_head);
+
        result = acpi_bus_register_driver(&acpi_video_bus);
        if (result < 0)
                return -ENODEV;
index 940edbf2fe8f4460b27fceb1e7fe5fdb7cbacfc3..84875fd4c74f9fbd100106ea3615ffcde2daf6cf 100644 (file)
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Lenovo Yoga 13",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+               },
+       },
        { },
 };
 
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void)
                acpi_video_get_capabilities(NULL);
 }
 
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
 {
        return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
 }
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
 
 /* Promote the vendor interface instead of the generic video module.
  * This function allow DMI blacklists to be implemented by externals
index c6707278a6bb496d200caf9f0bd4a57233b9b1a1..c4876ac9151a56bc95a05df647e0850cea3182c4 100644 (file)
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
-       dev->dma_mask = dma_mask;
        dev->dev.coherent_dma_mask = dma_mask;
        dev->irq[0] = irq1;
        dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
                dev_set_name(&dev->dev, "%s", name);
        dev->dev.release = amba_device_release;
        dev->dev.bus = &amba_bustype;
-       dev->dev.dma_mask = &dev->dma_mask;
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
        dev->res.name = dev_name(&dev->dev);
 }
 
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
        amba_device_initialize(dev, dev->dev.init_name);
        dev->dev.init_name = NULL;
 
-       if (!dev->dev.coherent_dma_mask && dev->dma_mask)
-               dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
        return amba_device_add(dev, parent);
 }
 
index 9d715ae5ff6b73b6bd09690b95e9686f196e9cdb..8e28f923cf7f3a221104eb625e4d3a89b5ab79ca 100644 (file)
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index 11456371f29b04fdeee0aaf8247f893b3ac52a13..2289efdf82030e388ce977957eeb5b26f26f19d2 100644 (file)
@@ -339,6 +339,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
        .sdev_attrs             = ahci_sdev_attrs
 
 extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
 extern struct ata_port_operations ahci_pmp_retry_srst_ops;
 
 unsigned int ahci_dev_classify(struct ata_port *ap);
@@ -368,6 +369,7 @@ irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
 irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
+void ahci_error_handler(struct ata_port *ap);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index 58debb0acc3a73743ea247f0c6b260368cd67f4b..ae2d73fe321e2f2c62d8e5709a788905edfd5a89 100644 (file)
@@ -1,6 +1,6 @@
 /*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
  * Freescale IMX AHCI SATA platform driver
- * Copyright 2013 Freescale Semiconductor, Inc.
  *
  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
  *
 #include <linux/of_device.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
 #include "ahci.h"
 
 enum {
-       HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+       PORT_PHY_CTL = 0x178,                   /* Port0 PHY Control */
+       PORT_PHY_CTL_PDDQ_LOC = 0x100000,       /* PORT_PHY_CTL bits */
+       HOST_TIMER1MS = 0xe0,                   /* Timer 1-ms */
 };
 
 struct imx_ahci_priv {
@@ -36,6 +39,56 @@ struct imx_ahci_priv {
        struct clk *sata_ref_clk;
        struct clk *ahb_clk;
        struct regmap *gpr;
+       bool no_device;
+       bool first_time;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+       u32 reg_val;
+       struct ata_device *dev;
+       struct ata_host *host = dev_get_drvdata(ap->dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+
+       ahci_error_handler(ap);
+
+       if (!(imxpriv->first_time) || ahci_imx_hotplug)
+               return;
+
+       imxpriv->first_time = false;
+
+       ata_for_each_dev(dev, &ap->link, ENABLED)
+               return;
+       /*
+        * Disable link to save power.  An imx ahci port can't be recovered
+        * without full reset once the pddq mode is enabled making it
+        * impossible to use as part of libata LPM.
+        */
+       reg_val = readl(mmio + PORT_PHY_CTL);
+       writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+       regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                       IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                       !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+       clk_disable_unprepare(imxpriv->sata_ref_clk);
+       imxpriv->no_device = true;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+       .inherits       = &ahci_platform_ops,
+       .error_handler  = ahci_imx_error_handler,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_imx_ops,
 };
 
 static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
@@ -117,9 +170,51 @@ static void imx6q_sata_exit(struct device *dev)
        clk_disable_unprepare(imxpriv->sata_ref_clk);
 }
 
+static int imx_ahci_suspend(struct device *dev)
+{
+       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+
+       /*
+        * If no_device is set, The CLKs had been gated off in the
+        * initialization so don't do it again here.
+        */
+       if (!imxpriv->no_device) {
+               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                               !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+               clk_disable_unprepare(imxpriv->sata_ref_clk);
+       }
+
+       return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+       int ret;
+
+       if (!imxpriv->no_device) {
+               ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+               if (ret < 0) {
+                       dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
+                       return ret;
+               }
+
+               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+               usleep_range(1000, 2000);
+       }
+
+       return 0;
+}
+
 static struct ahci_platform_data imx6q_sata_pdata = {
        .init = imx6q_sata_init,
        .exit = imx6q_sata_exit,
+       .ata_port_info = &ahci_imx_port_info,
+       .suspend = imx_ahci_suspend,
+       .resume = imx_ahci_resume,
 };
 
 static const struct of_device_id imx_ahci_of_match[] = {
@@ -152,6 +247,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
        ahci_dev = &ahci_pdev->dev;
        ahci_dev->parent = dev;
 
+       imxpriv->no_device = false;
+       imxpriv->first_time = true;
        imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
        if (IS_ERR(imxpriv->ahb_clk)) {
                dev_err(dev, "can't get ahb clock.\n");
index 2daaee05cab12d629502c913ef7102a793cdf27f..f9554318504f8a401c97b2221831b1cdbdcdb67f 100644 (file)
@@ -49,10 +49,11 @@ static struct platform_device_id ahci_devtype[] = {
 };
 MODULE_DEVICE_TABLE(platform, ahci_devtype);
 
-static struct ata_port_operations ahci_platform_ops = {
+struct ata_port_operations ahci_platform_ops = {
        .inherits       = &ahci_ops,
        .host_stop      = ahci_host_stop,
 };
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
 
 static struct ata_port_operations ahci_platform_retry_srst_ops = {
        .inherits       = &ahci_pmp_retry_srst_ops,
@@ -184,7 +185,7 @@ static int ahci_probe(struct platform_device *pdev)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index 513ad7ed0c997e19c3b3c4190b09221199c3d16c..6334c8d7c3f1e6a437b90af78e585e50698f5fc8 100644 (file)
 
 enum {
        PIIX_IOCFG              = 0x54, /* IDE I/O configuration register */
-       ICH5_PMR                = 0x90, /* port mapping register */
+       ICH5_PMR                = 0x90, /* address map register */
        ICH5_PCS                = 0x92, /* port control and status */
        PIIX_SIDPR_BAR          = 5,
        PIIX_SIDPR_LEN          = 16,
@@ -233,7 +233,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
          PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
        /* 82801GB/GR/GH (ICH7, identical to ICH6) */
        { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
-       /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+       /* 82801GBM/GHM (ICH7M, identical to ICH6M)  */
        { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
        /* Enterprise Southbridge 2 (631xESB/632xESB) */
        { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
@@ -517,7 +517,7 @@ static int ich_pata_cable_detect(struct ata_port *ap)
        const struct ich_laptop *lap = &ich_laptop[0];
        u8 mask;
 
-       /* Check for specials - Acer Aspire 5602WLMi */
+       /* Check for specials */
        while (lap->device) {
                if (lap->device == pdev->device &&
                    lap->subvendor == pdev->subsystem_vendor &&
@@ -1366,38 +1366,39 @@ static const int *piix_init_sata_map(struct pci_dev *pdev,
        const int *map;
        int i, invalid_map = 0;
        u8 map_value;
+       char buf[32];
+       char *p = buf, *end = buf + sizeof(buf);
 
        pci_read_config_byte(pdev, ICH5_PMR, &map_value);
 
        map = map_db->map[map_value & map_db->mask];
 
-       dev_info(&pdev->dev, "MAP [");
        for (i = 0; i < 4; i++) {
                switch (map[i]) {
                case RV:
                        invalid_map = 1;
-                       pr_cont(" XX");
+                       p += scnprintf(p, end - p, " XX");
                        break;
 
                case NA:
-                       pr_cont(" --");
+                       p += scnprintf(p, end - p, " --");
                        break;
 
                case IDE:
                        WARN_ON((i & 1) || map[i + 1] != IDE);
                        pinfo[i / 2] = piix_port_info[ich_pata_100];
                        i++;
-                       pr_cont(" IDE IDE");
+                       p += scnprintf(p, end - p, " IDE IDE");
                        break;
 
                default:
-                       pr_cont(" P%d", map[i]);
+                       p += scnprintf(p, end - p, " P%d", map[i]);
                        if (i & 1)
                                pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
                        break;
                }
        }
-       pr_cont(" ]\n");
+       dev_info(&pdev->dev, "MAP [%s ]\n", buf);
 
        if (invalid_map)
                dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
index acfd0f711069e9da304c95179b14127e20ae1c9c..080edd13dbc413ecd0ef4b14b3660f0efb36e9dd 100644 (file)
@@ -89,7 +89,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
                          unsigned long deadline);
 static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static void ahci_dev_config(struct ata_device *dev);
 #ifdef CONFIG_PM
@@ -189,14 +188,15 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
 };
 EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
 
-int ahci_em_messages = 1;
+static bool ahci_em_messages __read_mostly = true;
 EXPORT_SYMBOL_GPL(ahci_em_messages);
-module_param(ahci_em_messages, int, 0444);
+module_param(ahci_em_messages, bool, 0444);
 /* add other LED protocol types when they become supported */
 MODULE_PARM_DESC(ahci_em_messages,
        "AHCI Enclosure Management Message control (0 = off, 1 = on)");
 
-int devslp_idle_timeout = 1000;        /* device sleep idle timeout in ms */
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
 module_param(devslp_idle_timeout, int, 0644);
 MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
 
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
                                rc = ap->ops->transmit_led_message(ap,
                                                               emp->led_state,
                                                               4);
+                               /*
+                                * If busy, give a breather but do not
+                                * release EH ownership by using msleep()
+                                * instead of ata_msleep().  EM Transmit
+                                * bit is busy for the whole host and
+                                * releasing ownership will cause other
+                                * ports to fail the same way.
+                                */
                                if (rc == -EBUSY)
-                                       ata_msleep(ap, 1);
+                                       msleep(1);
                                else
                                        break;
                        }
@@ -1981,7 +1989,7 @@ static void ahci_thaw(struct ata_port *ap)
        writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
 }
 
-static void ahci_error_handler(struct ata_port *ap)
+void ahci_error_handler(struct ata_port *ap)
 {
        if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
                /* restart engine */
@@ -1994,6 +2002,7 @@ static void ahci_error_handler(struct ata_port *ap)
        if (!ata_dev_enabled(ap->link.device))
                ahci_stop_engine(ap);
 }
+EXPORT_SYMBOL_GPL(ahci_error_handler);
 
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 {
index 4ba8b04055728d49a0ac147a608d1ad27391d62a..ab714d2ad978644752ca3c9bdff74ae1c9f63934 100644 (file)
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
 {
        ata_acpi_clear_gtf(dev);
 }
-
-void ata_scsi_acpi_bind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
-}
-
-void ata_scsi_acpi_unbind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
-}
index c69fcce505c03d06c7b20ae333ff9708a228f869..370462fa8e01addd3387befee8f4ea78486b2cc0 100644 (file)
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
  *     should be retried.  To be used from EH.
  *
  *     SCSI midlayer limits the number of retries to scmd->allowed.
- *     scmd->retries is decremented for commands which get retried
+ *     scmd->allowed is incremented for commands which get retried
  *     due to unrelated failures (qc->err_mask is zero).
  */
 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
 {
        struct scsi_cmnd *scmd = qc->scsicmd;
-       if (!qc->err_mask && scmd->retries)
-               scmd->retries--;
+       if (!qc->err_mask)
+               scmd->allowed++;
        __ata_eh_qc_complete(qc);
 }
 
index 97a0cef1295916223202058b83e80a20de74c7de..db6dfcfa3e2ee932190069290814f2a71bc8f3f6 100644 (file)
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
                        if (!IS_ERR(sdev)) {
                                dev->sdev = sdev;
                                scsi_device_put(sdev);
-                               ata_scsi_acpi_bind(dev);
                        } else {
                                dev->sdev = NULL;
                        }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
        struct scsi_device *sdev;
        unsigned long flags;
 
-       ata_scsi_acpi_unbind(dev);
-
        /* Alas, we need to grab scan_mutex to ensure SCSI device
         * state doesn't change underneath us and thus
         * scsi_device_get() always succeeds.  The mutex locking can
index eeeb77845d48574e3f43b4b6e456302e5f3926d9..45b5ab3a95d51158c9ef0e618d6c8ae0fdeb373b 100644 (file)
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
 extern void ata_acpi_bind_port(struct ata_port *ap);
 extern void ata_acpi_bind_dev(struct ata_device *dev);
 extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
-extern void ata_scsi_acpi_bind(struct ata_device *dev);
-extern void ata_scsi_acpi_unbind(struct ata_device *dev);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
                                      pm_message_t state) { }
 static inline void ata_acpi_bind_port(struct ata_port *ap) {}
 static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
 #endif
 
 /* libata-scsi.c */
index 4bceb8803a10f50baee31c9c39233282ebbc6e91..b33d1f99b3a44bb40e76a529a8eddbc2268bdc20 100644 (file)
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
 
        ap->ioaddr.cmd_addr = cmd_addr;
 
-       if (pnp_port_valid(idev, 1) == 0) {
+       if (pnp_port_valid(idev, 1)) {
                ctl_addr = devm_ioport_map(&idev->dev,
                                           pnp_port_start(idev, 1), 1);
                ap->ioaddr.altstatus_addr = ctl_addr;
index 1ec53f8ca96fa682fc1492e61816c6b043876f7d..ddf470c2341d7f1f153b7d7776fa873b9c7906f3 100644 (file)
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct ata_port *ap;
        struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+       int ret;
 
        cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* acquire resources and fill host */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
        data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
index c51bbb9ea8e8a826e3d2fa68e693d1cf6a3a2a94..6231d4394f45d021c8dc7cf8a15a38c14e29b2c7 100644 (file)
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
        }
        cf_port->c0 = ap->ioaddr.ctl_addr;
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rv)
+               return ret;
 
        ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
 
index 958ba2a420c34b0e5411b116d639214e700f5c47..97f4acb54ad626795972ffb8e35572101d05d08d 100644 (file)
@@ -2,7 +2,7 @@
  *  sata_promise.c - Promise SATA
  *
  *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Mikael Pettersson <mikpe@it.uu.se>
+ *                 Mikael Pettersson
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 49e783e35ee94cf57e24c2e06c7ad8a7b26a29ff..364eded31881fbbf4d83adb0909d614b29692ed9 100644 (file)
@@ -420,7 +420,6 @@ struct fs_transmit_config {
 #define RC_FLAGS_BFPS_BFP27 (0xd << 17)
 #define RC_FLAGS_BFPS_BFP47 (0xe << 17)
 
-#define RC_FLAGS_BFPS       (0x1 << 17)
 #define RC_FLAGS_BFPP       (0x1 << 21)
 #define RC_FLAGS_TEVC       (0x1 << 22)
 #define RC_FLAGS_TEP        (0x1 << 23)
index c7cfadcf67521d82f7d977979631e4805f5a31e9..34abf4d8a45ff4f3f6d25787887a8bc787779133 100644 (file)
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
  */
 void device_shutdown(void)
 {
-       struct device *dev;
+       struct device *dev, *parent;
 
        spin_lock(&devices_kset->list_lock);
        /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
                 * prevent it from being freed because parent's
                 * lock is to be held
                 */
-               get_device(dev->parent);
+               parent = get_device(dev->parent);
                get_device(dev);
                /*
                 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
                spin_unlock(&devices_kset->list_lock);
 
                /* hold lock to avoid race with probe/release */
-               if (dev->parent)
-                       device_lock(dev->parent);
+               if (parent)
+                       device_lock(parent);
                device_lock(dev);
 
                /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
                }
 
                device_unlock(dev);
-               if (dev->parent)
-                       device_unlock(dev->parent);
+               if (parent)
+                       device_unlock(parent);
 
                put_device(dev);
-               put_device(dev->parent);
+               put_device(parent);
 
                spin_lock(&devices_kset->list_lock);
        }
index 848ebbd25717269fa0ef34e1632e502eb29f4aeb..f48370dfc908e19a73a86cf817ad05128c0ed034 100644 (file)
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
        struct cpu *cpu = container_of(dev, struct cpu, dev);
        int cpuid = dev->id;
        int from_nid, to_nid;
-       int ret = -ENODEV;
-
-       cpu_hotplug_driver_lock();
+       int ret;
 
        from_nid = cpu_to_node(cpuid);
        if (from_nid == NUMA_NO_NODE)
-               goto out;
+               return -ENODEV;
 
        ret = cpu_up(cpuid);
        /*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
        if (from_nid != to_nid)
                change_cpu_under_node(cpu, from_nid, to_nid);
 
- out:
-       cpu_hotplug_driver_unlock();
        return ret;
 }
 
 static int cpu_subsys_offline(struct device *dev)
 {
-       int ret;
-
-       cpu_hotplug_driver_lock();
-       ret = cpu_down(dev->id);
-       cpu_hotplug_driver_unlock();
-       return ret;
+       return cpu_down(dev->id);
 }
 
 void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
                               const char *buf,
                               size_t count)
 {
-       return arch_cpu_probe(buf, count);
+       ssize_t cnt;
+       int ret;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       cnt = arch_cpu_probe(buf, count);
+
+       unlock_device_hotplug();
+       return cnt;
 }
 
 static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
                                 const char *buf,
                                 size_t count)
 {
-       return arch_cpu_release(buf, count);
+       ssize_t cnt;
+       int ret;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       cnt = arch_cpu_release(buf, count);
+
+       unlock_device_hotplug();
+       return cnt;
 }
 
 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
index 9e59f6535c442bbb26d6ad055b93a4b0cf5bef5d..bece691cb5d99d79f761b9400eebf1e9a78c55a2 100644 (file)
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
                online_type = ONLINE_KEEP;
        else if (!strncmp(buf, "offline", min_t(int, count, 7)))
                online_type = -1;
-       else
-               return -EINVAL;
+       else {
+               ret = -EINVAL;
+               goto err;
+       }
 
        switch (online_type) {
        case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
                ret = -EINVAL; /* should never happen */
        }
 
+err:
        unlock_device_hotplug();
 
        if (ret)
index 9f098a82cf04b061edb203fff718ef0ca70e6328..ee039afe90786510eb19e0a536beb30413b27a38 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/suspend.h>
 #include <trace/events/power.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
        return error;
 }
 
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+       struct device           *dev;
+       struct task_struct      *tsk;
+       struct timer_list       timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+       struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+       struct dpm_watchdog *wd = (void *)data;
+
+       dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+       show_stack(wd->tsk, NULL);
+       panic("%s %s: unrecoverable failure\n",
+               dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+       struct timer_list *timer = &wd->timer;
+
+       wd->dev = dev;
+       wd->tsk = current;
+
+       init_timer_on_stack(timer);
+       /* use same timeout value for both suspend and resume */
+       timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+       timer->function = dpm_watchdog_handler;
+       timer->data = (unsigned long)wd;
+       add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+       struct timer_list *timer = &wd->timer;
+
+       del_timer_sync(timer);
+       destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
                goto Complete;
 
        dpm_wait(dev->parent, async);
+       dpm_watchdog_set(&wd, dev);
        device_lock(dev);
 
        /*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+       dpm_watchdog_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
        dpm_wait_for_children(dev, async);
 
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (dev->power.syscore)
                goto Complete;
 
+       dpm_watchdog_set(&wd, dev);
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+       dpm_watchdog_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
index ef89897c6043eec57ed2d8d1f4c6db464cfd92d1..fa41874184401cd46c155526102fb72aed99e329 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/list.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/of.h>
 #include <linux/export.h>
 
@@ -42,7 +42,7 @@
  */
 
 /**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
  * @node:      opp list node. The nodes are maintained throughout the lifetime
  *             of boot. It is expected only an optimal set of OPPs are
  *             added to the library by the SoC framework.
@@ -59,7 +59,7 @@
  *
  * This structure stores the OPP information for a given device.
  */
-struct opp {
+struct dev_pm_opp {
        struct list_head node;
 
        bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
 }
 
 /**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
  * @opp:       opp for which voltage has to be returned for
  *
  * Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
-       struct opp *tmp_opp;
+       struct dev_pm_opp *tmp_opp;
        unsigned long v = 0;
 
        tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
 
        return v;
 }
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 
 /**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  * @opp:       opp for which frequency has to be returned for
  *
  * Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 {
-       struct opp *tmp_opp;
+       struct dev_pm_opp *tmp_opp;
        unsigned long f = 0;
 
        tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
 
        return f;
 }
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
 /**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:       device for which we do this operation
  *
  * This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
  * internally references two RCU protected structures: device_opp and opp which
  * are safe as long as we are under a common RCU locked section.
  */
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp;
+       struct dev_pm_opp *temp_opp;
        int count = 0;
 
        dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
 
        return count;
 }
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
 
 /**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
  * @dev:               device for which we do this operation
  * @freq:              frequency to search for
  * @available:         true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-                               bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                             unsigned long freq,
+                                             bool available)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 
 /**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  * @dev:       device for which we do this operation
  * @freq:      Start frequency
  *
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                            unsigned long *freq)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 
 /**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  * @dev:       device for which we do this operation
  * @freq:      Start frequency
  *
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                             unsigned long *freq)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
 /**
- * opp_add()  - Add an OPP table from a table definitions
+ * dev_pm_opp_add()  - Add an OPP table from a table definitions
  * @dev:       device for which we do this operation
  * @freq:      Frequency in Hz for this OPP
  * @u_volt:    Voltage in uVolts for this OPP
  *
  * This function adds an opp definition to the opp list and returns status.
  * The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
  */
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 {
        struct device_opp *dev_opp = NULL;
-       struct opp *opp, *new_opp;
+       struct dev_pm_opp *opp, *new_opp;
        struct list_head *head;
 
        /* allocate new OPP node */
-       new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+       new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
        if (!new_opp) {
                dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
                return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
        srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
        return 0;
 }
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 
 /**
  * opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
                bool availability_req)
 {
        struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
-       struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
        int r = 0;
 
        /* keep the node allocated */
-       new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+       new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
        if (!new_opp) {
                dev_warn(dev, "%s: Unable to create OPP\n", __func__);
                return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
 }
 
 /**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
  * @dev:       device for which we do this operation
  * @freq:      OPP frequency to enable
  *
  * Enables a provided opp. If the operation is valid, this returns 0, else the
  * corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
        return opp_set_availability(dev, freq, true);
 }
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
 
 /**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
  * @dev:       device for which we do this operation
  * @freq:      OPP frequency to disable
  *
  * Disables a provided opp. If the operation is valid, this returns
  * 0, else the corresponding error value. It is meant to be a temporary
  * control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
 {
        return opp_set_availability(dev, freq, false);
 }
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
 
 #ifdef CONFIG_CPU_FREQ
 /**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  * @dev:       device for which we do this operation
  * @table:     Cpufreq table returned back to caller
  *
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
  * Callers should ensure that this function is *NOT* called under RCU protection
  * or in contexts where mutex locking cannot be used.
  */
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
                            struct cpufreq_frequency_table **table)
 {
        struct device_opp *dev_opp;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct cpufreq_frequency_table *freq_table;
        int i = 0;
 
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
        }
 
        freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
-                            (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+                            (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
        if (!freq_table) {
                mutex_unlock(&dev_opp_list_lock);
                dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
 
 /**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  * @dev:       device for which we do this operation
  * @table:     table to free
  *
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  */
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
                                struct cpufreq_frequency_table **table)
 {
        if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
        kfree(*table);
        *table = NULL;
 }
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 #endif         /* CONFIG_CPU_FREQ */
 
 /**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  * @dev:       device pointer used to lookup device OPPs.
  */
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
        struct device_opp *dev_opp = find_device_opp(dev);
 
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
                unsigned long freq = be32_to_cpup(val++) * 1000;
                unsigned long volt = be32_to_cpup(val++);
 
-               if (opp_add(dev, freq, volt)) {
+               if (dev_pm_opp_add(dev, freq, volt)) {
                        dev_warn(dev, "%s: Failed to add OPP %ld\n",
                                 __func__, freq);
                        continue;
index 268a35097578d94f78f4baef4d9cc8b3fa1c31cd..72e00e66ecc5d3f84c6d2cce5f6d387c3d4ee260 100644 (file)
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
  * Check if the device's runtime PM status allows it to be suspended.  If
  * another idle notification has been started earlier, return immediately.  If
  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
  *
  * This function must be called under dev->power.lock with interrupts disabled.
  */
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
 
  out:
        trace_rpm_return_int(dev, _THIS_IP_, retval);
-       return retval ? retval : rpm_suspend(dev, rpmflags);
+       return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
 /**
index c9fd6943ce456a1123e58106b0f028418d535fdd..50329d1057ed5dc5c929fde89237add36c55ea48 100644 (file)
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
        }
 }
 
-static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
-{
-       u16 data;
-
-       if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
-               data = up ? 0x74 : 0x7C;
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
-       } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
-               data = up ? 0x75 : 0x7D;
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
-       }
-}
-
 /**************************************************
  * Init.
  **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
                bcma_core_pci_clientmode_init(pc);
 }
 
+void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+       struct bcma_drv_pci *pc;
+       u16 data;
+
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       pc = &bus->drv_pci[0];
+
+       if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
+               data = up ? 0x74 : 0x7C;
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+       } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
+               data = up ? 0x75 : 0x7D;
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+       }
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
+
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
                          bool enable)
 {
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
 
        pc = &bus->drv_pci[0];
 
-       bcma_core_pci_power_save(pc, true);
-
        bcma_core_pci_extend_L1timer(pc, true);
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
        pc = &bus->drv_pci[0];
 
        bcma_core_pci_extend_L1timer(pc, false);
-
-       bcma_core_pci_power_save(pc, false);
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_down);
index d2d95ff5353b08a4f49d1c3da4179090751851bf..edfa2515bc8613f952c448194bfcebf7835d75c1 100644 (file)
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
        int err;
        u32 cp;
 
+       memset(&arg64, 0, sizeof(arg64));
        err = 0;
        err |=
            copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
index 639d26b90b9117a56c69f991663f603847cc206c..2b944038453681ef15ba61f41e1cfa3a9e885fbe 100644 (file)
@@ -1193,6 +1193,7 @@ out_passthru:
                ida_pci_info_struct pciinfo;
 
                if (!arg) return -EINVAL;
+               memset(&pciinfo, 0, sizeof(pciinfo));
                pciinfo.bus = host->pci_dev->bus->number;
                pciinfo.dev_fn = host->pci_dev->devfn;
                pciinfo.board_id = host->board_id;
index 40e715531aa65f0e63babd13757ad6dfb104d22a..e5647690a751ef1f1ea6bcd09ee24dc758a1e292 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include <linux/aio.h>
 #include "loop.h"
 
 #include <asm/uaccess.h>
@@ -218,6 +219,48 @@ lo_do_transfer(struct loop_device *lo, int cmd,
        return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 }
 
+#ifdef CONFIG_AIO
+static void lo_rw_aio_complete(u64 data, long res)
+{
+       struct bio *bio = (struct bio *)(uintptr_t)data;
+
+       if (res > 0)
+               res = 0;
+       else if (res < 0)
+               res = -EIO;
+
+       bio_endio(bio, res);
+}
+
+static int lo_rw_aio(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       struct kiocb *iocb;
+       unsigned int op;
+       struct iov_iter iter;
+       struct bio_vec *bvec;
+       size_t nr_segs;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+
+       iocb = aio_kernel_alloc(GFP_NOIO);
+       if (!iocb)
+               return -ENOMEM;
+
+       if (bio_rw(bio) & WRITE)
+               op = IOCB_CMD_WRITE_ITER;
+       else
+               op = IOCB_CMD_READ_ITER;
+
+       bvec = bio_iovec_idx(bio, bio->bi_idx);
+       nr_segs = bio_segments(bio);
+       iov_iter_init_bvec(&iter, bvec, nr_segs, bvec_length(bvec, nr_segs), 0);
+       aio_kernel_init_rw(iocb, file, iov_iter_count(&iter), pos);
+       aio_kernel_init_callback(iocb, lo_rw_aio_complete, (u64)(uintptr_t)bio);
+
+       return aio_kernel_submit(iocb, op, &iter);
+}
+#endif /* CONFIG_AIO */
+
 /**
  * __do_lo_send_write - helper for writing data to a loop device
  *
@@ -418,50 +461,33 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               struct file *file = lo->lo_backing_file;
-
-               if (bio->bi_rw & REQ_FLUSH) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL)) {
-                               ret = -EIO;
-                               goto out;
-                       }
-               }
+               ret = lo_send(lo, bio, pos);
+       } else
+               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 
-               /*
-                * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do not support discard if
-                * encryption is enabled, because it may give an attacker
-                * useful information.
-                */
-               if (bio->bi_rw & REQ_DISCARD) {
-                       struct file *file = lo->lo_backing_file;
-                       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       return ret;
+}
 
-                       if ((!file->f_op->fallocate) ||
-                           lo->lo_encrypt_key_size) {
-                               ret = -EOPNOTSUPP;
-                               goto out;
-                       }
-                       ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
-                       if (unlikely(ret && ret != -EINVAL &&
-                                    ret != -EOPNOTSUPP))
-                               ret = -EIO;
-                       goto out;
-               }
+static int lo_discard(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       int ret;
 
-               ret = lo_send(lo, bio, pos);
+       /*
+        * We use punch hole to reclaim the free space used by the
+        * image a.k.a. discard. However we do not support discard if
+        * encryption is enabled, because it may give an attacker
+        * useful information.
+        */
 
-               if ((bio->bi_rw & REQ_FUA) && !ret) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL))
-                               ret = -EIO;
-               }
-       } else
-               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+       if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size)
+               return -EOPNOTSUPP;
 
-out:
+       ret = file->f_op->fallocate(file, mode, pos, bio->bi_size);
+       if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+               ret = -EIO;
        return ret;
 }
 
@@ -525,7 +551,35 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
                do_loop_switch(lo, bio->bi_private);
                bio_put(bio);
        } else {
-               int ret = do_bio_filebacked(lo, bio);
+               int ret;
+
+               if (bio_rw(bio) == WRITE) {
+                       if (bio->bi_rw & REQ_FLUSH) {
+                               ret = vfs_fsync(lo->lo_backing_file, 1);
+                               if (unlikely(ret && ret != -EINVAL))
+                                       goto out;
+                       }
+                       if (bio->bi_rw & REQ_DISCARD) {
+                               ret = lo_discard(lo, bio);
+                               goto out;
+                       }
+               }
+#ifdef CONFIG_AIO
+               if (lo->lo_flags & LO_FLAGS_USE_AIO &&
+                   lo->transfer == transfer_none) {
+                       ret = lo_rw_aio(lo, bio);
+                       if (ret == 0)
+                               return;
+               } else
+#endif
+                       ret = do_bio_filebacked(lo, bio);
+
+               if ((bio_rw(bio) == WRITE) && bio->bi_rw & REQ_FUA && !ret) {
+                       ret = vfs_fsync(lo->lo_backing_file, 0);
+                       if (unlikely(ret && ret != -EINVAL))
+                               ret = -EIO;
+               }
+out:
                bio_endio(bio, ret);
        }
 }
@@ -547,6 +601,12 @@ static int loop_thread(void *data)
        struct loop_device *lo = data;
        struct bio *bio;
 
+       /*
+        * In cases where the underlying filesystem calls balance_dirty_pages()
+        * we want less throttling to avoid lock ups trying to write dirty
+        * pages through the loop device
+        */
+       current->flags |= PF_LESS_THROTTLE;
        set_user_nice(current, -20);
 
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
@@ -869,6 +929,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
            !file->f_op->write)
                lo_flags |= LO_FLAGS_READ_ONLY;
 
+#ifdef CONFIG_AIO
+       if (file->f_op->write_iter && file->f_op->read_iter &&
+           mapping->a_ops->direct_IO) {
+               file->f_flags |= O_DIRECT;
+               lo_flags |= LO_FLAGS_USE_AIO;
+       }
+#endif
+
        lo_blocksize = S_ISBLK(inode->i_mode) ?
                inode->i_bdev->bd_block_size : PAGE_SIZE;
 
@@ -912,6 +980,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
        set_blocksize(bdev, lo_blocksize);
 
+#ifdef CONFIG_AIO
+       /*
+        * We must not send too-small direct-io requests, so we inherit
+        * the logical block size from the underlying device
+        */
+       if ((lo_flags & LO_FLAGS_USE_AIO) && inode->i_sb->s_bdev)
+               blk_queue_logical_block_size(lo->lo_queue,
+                               bdev_logical_block_size(inode->i_sb->s_bdev));
+#endif
+
        lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
                                                lo->lo_number);
        if (IS_ERR(lo->lo_thread)) {
index da52092980e2312987b6a1040df5c0ba444852c3..26d03fa0bf26696d9e004b3983a580d409d3d006 100644 (file)
@@ -1949,12 +1949,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (pci_request_selected_regions(pdev, bars, "nvme"))
                goto disable_pci;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-       else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-       else
-               goto disable_pci;
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+               goto disable;
 
        pci_set_drvdata(pdev, dev);
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        INIT_LIST_HEAD(&dev->namespaces);
        dev->pci_dev = pdev;
+
        result = nvme_set_instance(dev);
        if (result)
                goto free;
index 200926699778e2a0ec2605429362f92282494a56..bb5b90e8e7687a0b71d33aae92f7050f741a6fa9 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 
 #include <asm/cacheflush.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
 #include <asm/smp_plat.h>
 
+#define DRIVER_NAME            "CCI-400"
+#define DRIVER_NAME_PMU                DRIVER_NAME " PMU"
+#define PMU_NAME               "CCI_400"
+
 #define CCI_PORT_CTRL          0x0
 #define CCI_CTRL_STATUS                0xc
 
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports;
 static void __iomem *cci_ctrl_base;
 static unsigned long cci_ctrl_phys;
 
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI_PMCR               0x0100
+#define CCI_PID2               0x0fe8
+
+#define CCI_PMCR_CEN           0x00000001
+#define CCI_PMCR_NCNT_MASK     0x0000f800
+#define CCI_PMCR_NCNT_SHIFT    11
+
+#define CCI_PID2_REV_MASK      0xf0
+#define CCI_PID2_REV_SHIFT     4
+
+/* Port ids */
+#define CCI_PORT_S0    0
+#define CCI_PORT_S1    1
+#define CCI_PORT_S2    2
+#define CCI_PORT_S3    3
+#define CCI_PORT_S4    4
+#define CCI_PORT_M0    5
+#define CCI_PORT_M1    6
+#define CCI_PORT_M2    7
+
+#define CCI_REV_R0             0
+#define CCI_REV_R1             1
+#define CCI_REV_R0_P4          4
+#define CCI_REV_R1_P2          6
+
+#define CCI_PMU_EVT_SEL                0x000
+#define CCI_PMU_CNTR           0x004
+#define CCI_PMU_CNTR_CTRL      0x008
+#define CCI_PMU_OVRFLW         0x00c
+
+#define CCI_PMU_OVRFLW_FLAG    1
+
+#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+       CCI_PMU_CYCLES = 0xff
+};
+
+#define CCI_PMU_EVENT_MASK             0xff
+#define CCI_PMU_EVENT_SOURCE(event)    ((event >> 5) & 0x7)
+#define CCI_PMU_EVENT_CODE(event)      (event & 0x1f)
+
+#define CCI_PMU_MAX_HW_EVENTS 5   /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI_PMU_CYCLE_CNTR_IDX         0
+#define CCI_PMU_CNTR0_IDX              1
+#define CCI_PMU_CNTR_LAST(cci_pmu)     (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV   0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV   0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV  0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV  0x1a
+
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV   0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV   0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV  0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV  0x11
+
+struct pmu_port_event_ranges {
+       u8 slave_min;
+       u8 slave_max;
+       u8 master_min;
+       u8 master_max;
+};
+
+static struct pmu_port_event_ranges port_event_range[] = {
+       [CCI_REV_R0] = {
+               .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
+               .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
+               .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
+               .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
+       },
+       [CCI_REV_R1] = {
+               .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
+               .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
+               .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
+               .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
+       },
+};
+
+struct cci_pmu_drv_data {
+       void __iomem *base;
+       struct arm_pmu *cci_pmu;
+       int nr_irqs;
+       int irqs[CCI_PMU_MAX_HW_EVENTS];
+       unsigned long active_irqs;
+       struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
+       unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
+       struct pmu_port_event_ranges *port_ranges;
+       struct pmu_hw_events hw_events;
+};
+static struct cci_pmu_drv_data *pmu;
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+       int i;
+
+       for (i = 0; i < nr_irqs; i++)
+               if (irq == irqs[i])
+                       return true;
+
+       return false;
+}
+
+static int probe_cci_revision(void)
+{
+       int rev;
+       rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+       rev >>= CCI_PID2_REV_SHIFT;
+
+       if (rev <= CCI_REV_R0_P4)
+               return CCI_REV_R0;
+       else if (rev <= CCI_REV_R1_P2)
+               return CCI_REV_R1;
+
+       return -ENOENT;
+}
+
+static struct pmu_port_event_ranges *port_range_by_rev(void)
+{
+       int rev = probe_cci_revision();
+
+       if (rev < 0)
+               return NULL;
+
+       return &port_event_range[rev];
+}
+
+static int pmu_is_valid_slave_event(u8 ev_code)
+{
+       return pmu->port_ranges->slave_min <= ev_code &&
+               ev_code <= pmu->port_ranges->slave_max;
+}
+
+static int pmu_is_valid_master_event(u8 ev_code)
+{
+       return pmu->port_ranges->master_min <= ev_code &&
+               ev_code <= pmu->port_ranges->master_max;
+}
+
+static int pmu_validate_hw_event(u8 hw_event)
+{
+       u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
+       u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+
+       switch (ev_source) {
+       case CCI_PORT_S0:
+       case CCI_PORT_S1:
+       case CCI_PORT_S2:
+       case CCI_PORT_S3:
+       case CCI_PORT_S4:
+               /* Slave Interface */
+               if (pmu_is_valid_slave_event(ev_code))
+                       return hw_event;
+               break;
+       case CCI_PORT_M0:
+       case CCI_PORT_M1:
+       case CCI_PORT_M2:
+               /* Master Interface */
+               if (pmu_is_valid_master_event(ev_code))
+                       return hw_event;
+               break;
+       }
+
+       return -ENOENT;
+}
+
+static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx)
+{
+       return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
+               idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(int idx, unsigned int offset)
+{
+       return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+       return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_disable_counter(int idx)
+{
+       pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(int idx)
+{
+       pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_set_event(int idx, unsigned long event)
+{
+       event &= CCI_PMU_EVENT_MASK;
+       pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
+}
+
+static u32 pmu_get_max_counters(void)
+{
+       u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+                     CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+
+       /* add 1 for cycle counter */
+       return n_cnts + 1;
+}
+
+static struct pmu_hw_events *pmu_get_hw_events(void)
+{
+       return &pmu->hw_events;
+}
+
+static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_event = &event->hw;
+       unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
+       int idx;
+
+       if (cci_event == CCI_PMU_CYCLES) {
+               if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+                       return -EAGAIN;
+
+               return CCI_PMU_CYCLE_CNTR_IDX;
+       }
+
+       for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+               if (!test_and_set_bit(idx, hw->used_mask))
+                       return idx;
+
+       /* No counters available */
+       return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+       int mapping;
+       u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
+
+       if (event->attr.type < PERF_TYPE_MAX)
+               return -ENOENT;
+
+       if (config == CCI_PMU_CYCLES)
+               mapping = config;
+       else
+               mapping = pmu_validate_hw_event(config);
+
+       return mapping;
+}
+
+static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+       int i;
+       struct platform_device *pmu_device = cci_pmu->plat_device;
+
+       if (unlikely(!pmu_device))
+               return -ENODEV;
+
+       if (pmu->nr_irqs < 1) {
+               dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Register all available CCI PMU interrupts. In the interrupt handler
+        * we iterate over the counters checking for interrupt source (the
+        * overflowing counter) and clear it.
+        *
+        * This should allow handling of non-unique interrupt for the counters.
+        */
+       for (i = 0; i < pmu->nr_irqs; i++) {
+               int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
+                               "arm-cci-pmu", cci_pmu);
+               if (err) {
+                       dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+                               pmu->irqs[i]);
+                       return err;
+               }
+
+               set_bit(i, &pmu->active_irqs);
+       }
+
+       return 0;
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+       unsigned long flags;
+       struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       int idx, handled = IRQ_NONE;
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+       regs = get_irq_regs();
+       /*
+        * Iterate over counters and update the corresponding perf events.
+        * This should work regardless of whether we have per-counter overflow
+        * interrupt or a combined overflow interrupt.
+        */
+       for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+               struct perf_event *event = events->events[idx];
+               struct hw_perf_event *hw_counter;
+
+               if (!event)
+                       continue;
+
+               hw_counter = &event->hw;
+
+               /* Did this counter overflow? */
+               if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
+                       continue;
+
+               pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
+
+               handled = IRQ_HANDLED;
+
+               armpmu_event_update(event);
+               perf_sample_data_init(&data, 0, hw_counter->last_period);
+               if (!armpmu_event_set_period(event))
+                       continue;
+
+               if (perf_event_overflow(event, &data, regs))
+                       cci_pmu->disable(event);
+       }
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+       return IRQ_RETVAL(handled);
+}
+
+static void pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+       int i;
+
+       for (i = 0; i < pmu->nr_irqs; i++) {
+               if (!test_and_clear_bit(i, &pmu->active_irqs))
+                       continue;
+
+               free_irq(pmu->irqs[i], cci_pmu);
+       }
+}
+
+static void pmu_enable_event(struct perf_event *event)
+{
+       unsigned long flags;
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Configure the event to count, unless you are counting cycles */
+       if (idx != CCI_PMU_CYCLE_CNTR_IDX)
+               pmu_set_event(idx, hw_counter->config_base);
+
+       pmu_enable_counter(idx);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_disable_event(struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return;
+       }
+
+       pmu_disable_counter(idx);
+}
+
+static void pmu_start(struct arm_pmu *cci_pmu)
+{
+       u32 val;
+       unsigned long flags;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Enable all the PMU counters. */
+       val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+       writel(val, cci_ctrl_base + CCI_PMCR);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_stop(struct arm_pmu *cci_pmu)
+{
+       u32 val;
+       unsigned long flags;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Disable all the PMU counters. */
+       val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+       writel(val, cci_ctrl_base + CCI_PMCR);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+       u32 value;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return 0;
+       }
+       value = pmu_read_register(idx, CCI_PMU_CNTR);
+
+       return value;
+}
+
+static void pmu_write_counter(struct perf_event *event, u32 value)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+       else
+               pmu_write_register(value, idx, CCI_PMU_CNTR);
+}
+
+static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
+{
+       *cci_pmu = (struct arm_pmu){
+               .name             = PMU_NAME,
+               .max_period       = (1LLU << 32) - 1,
+               .get_hw_events    = pmu_get_hw_events,
+               .get_event_idx    = pmu_get_event_idx,
+               .map_event        = pmu_map_event,
+               .request_irq      = pmu_request_irq,
+               .handle_irq       = pmu_handle_irq,
+               .free_irq         = pmu_free_irq,
+               .enable           = pmu_enable_event,
+               .disable          = pmu_disable_event,
+               .start            = pmu_start,
+               .stop             = pmu_stop,
+               .read_counter     = pmu_read_counter,
+               .write_counter    = pmu_write_counter,
+       };
+
+       cci_pmu->plat_device = pdev;
+       cci_pmu->num_events = pmu_get_max_counters();
+
+       return armpmu_register(cci_pmu, -1);
+}
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+       {
+               .compatible = "arm,cci-400-pmu",
+       },
+       {},
+};
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int i, ret, irq;
+
+       pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+       if (!pmu)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pmu->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pmu->base))
+               return -ENOMEM;
+
+       /*
+        * CCI PMU has 5 overflow signals - one per counter; but some may be tied
+        * together to a common interrupt.
+        */
+       pmu->nr_irqs = 0;
+       for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0)
+                       break;
+
+               if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
+                       continue;
+
+               pmu->irqs[pmu->nr_irqs++] = irq;
+       }
+
+       /*
+        * Ensure that the device tree has as many interrupts as the number
+        * of counters.
+        */
+       if (i < CCI_PMU_MAX_HW_EVENTS) {
+               dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+                       i, CCI_PMU_MAX_HW_EVENTS);
+               return -EINVAL;
+       }
+
+       pmu->port_ranges = port_range_by_rev();
+       if (!pmu->port_ranges) {
+               dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+               return -EINVAL;
+       }
+
+       pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
+       if (!pmu->cci_pmu)
+               return -ENOMEM;
+
+       pmu->hw_events.events = pmu->events;
+       pmu->hw_events.used_mask = pmu->used_mask;
+       raw_spin_lock_init(&pmu->hw_events.pmu_lock);
+
+       ret = cci_pmu_init(pmu->cci_pmu, pdev);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+       if (!cci_probed())
+               return -ENODEV;
+
+       return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
 struct cpu_port {
        u64 mpidr;
        u32 port;
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn)
 }
 EXPORT_SYMBOL_GPL(cci_ace_get_port);
 
-static void __init cci_ace_init_ports(void)
+static void cci_ace_init_ports(void)
 {
        int port, cpu;
        struct device_node *cpun;
@@ -386,7 +958,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = {
        {},
 };
 
-static int __init cci_probe(void)
+static int cci_probe(void)
 {
        struct cci_nb_ports const *cci_config;
        int ret, i, nb_ace = 0, nb_ace_lite = 0;
@@ -490,7 +1062,7 @@ memalloc_err:
 static int cci_init_status = -EAGAIN;
 static DEFINE_MUTEX(cci_probing);
 
-static int __init cci_init(void)
+static int cci_init(void)
 {
        if (cci_init_status != -EAGAIN)
                return cci_init_status;
@@ -502,18 +1074,55 @@ static int __init cci_init(void)
        return cci_init_status;
 }
 
+#ifdef CONFIG_HW_PERF_EVENTS
+static struct platform_driver cci_pmu_driver = {
+       .driver = {
+                  .name = DRIVER_NAME_PMU,
+                  .of_match_table = arm_cci_pmu_matches,
+                 },
+       .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+       .driver = {
+                  .name = DRIVER_NAME,
+                  .of_match_table = arm_cci_matches,
+                 },
+       .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&cci_pmu_driver);
+       if (ret)
+               return ret;
+
+       return platform_driver_register(&cci_platform_driver);
+}
+
+#else
+
+static int __init cci_platform_init(void)
+{
+       return 0;
+}
+
+#endif
 /*
  * To sort out early init calls ordering a helper function is provided to
  * check if the CCI driver has beed initialized. Function check if the driver
  * has been initialized, if not it calls the init function that probes
  * the driver and updates the return value.
  */
-bool __init cci_probed(void)
+bool cci_probed(void)
 {
        return cci_init() == 0;
 }
 EXPORT_SYMBOL_GPL(cci_probed);
 
 early_initcall(cci_init);
+core_initcall(cci_platform_init);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ARM CCI support");
index 19ab6ff53d59776cedc592a5b18c2fba8fe02a65..2394e9753ef56b47e93d91bd7499eeb657c51383 100644 (file)
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
                                         phys_addr_t sdramwins_phys_base,
                                         size_t sdramwins_size)
 {
+       struct device_node *np;
        int win;
 
        mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
                return -ENOMEM;
        }
 
-       if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
+       np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
+       if (np) {
                mbus->hw_io_coherency = 1;
+               of_node_put(np);
+       }
 
        for (win = 0; win < mbus->soc->num_wins; win++)
                mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
        int ret;
 
        /*
-        * These are optional, so we clear them and they'll
-        * be zero if they are missing from the DT.
+        * These are optional, so we make sure that resource_size(x) will
+        * return 0.
         */
        memset(mem, 0, sizeof(struct resource));
+       mem->end = -1;
        memset(io, 0, sizeof(struct resource));
+       io->end = -1;
 
        ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
        if (!ret) {
index 0aa9d91daef500486e999c8ce4ccb4cb90d2755d..c206de2951f20f086c099ab69e8a1a48286465be 100644 (file)
@@ -290,6 +290,19 @@ config HW_RANDOM_PSERIES
 
          If unsure, say Y.
 
+config HW_RANDOM_POWERNV
+       tristate "PowerNV Random Number Generator support"
+       depends on HW_RANDOM && PPC_POWERNV
+       default HW_RANDOM
+       ---help---
+         This is the driver for Random Number Generator hardware found
+         in POWER7+ and above machines for PowerNV platform.
+
+         To compile this driver as a module, choose M here: the
+         module will be called powernv-rng.
+
+         If unsure, say Y.
+
 config HW_RANDOM_EXYNOS
        tristate "EXYNOS HW random number generator support"
        depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
index bed467c9300e6ec99240a262585d6bf9409a0b94..d7d2435ff7fa8d38cd129d327c59ded717e3682a 100644 (file)
@@ -24,6 +24,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
 obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
 obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
 obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
 obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
 obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644 (file)
index 0000000..3f4f632
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       unsigned long *buf;
+       int i, len;
+
+       /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+       len = max / sizeof(unsigned long);
+
+       buf = (unsigned long *)data;
+
+       for (i = 0; i < len; i++)
+               powernv_get_random_long(buf++);
+
+       return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+       .name = "powernv-rng",
+       .read = powernv_rng_read,
+};
+
+static int powernv_rng_remove(struct platform_device *pdev)
+{
+       hwrng_unregister(&powernv_hwrng);
+
+       return 0;
+}
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+       int rc;
+
+       rc = hwrng_register(&powernv_hwrng);
+       if (rc) {
+               /* We only register one device, ignore any others */
+               if (rc == -EEXIST)
+                       rc = -ENODEV;
+
+               return rc;
+       }
+
+       pr_info("Registered powernv hwrng.\n");
+
+       return 0;
+}
+
+static struct of_device_id powernv_rng_match[] = {
+       { .compatible   = "ibm,power-rng",},
+       {},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+       .driver = {
+               .name = "powernv_rng",
+               .of_match_table = powernv_rng_match,
+       },
+       .probe  = powernv_rng_probe,
+       .remove = powernv_rng_remove,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
index 5f1197929f0ceebaa2ca34632ca16be9046150dc..b761459a3436c25d31321f5bb46037f7faaf9292 100644 (file)
@@ -17,6 +17,9 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/hw_random.h>
 #include <asm/vio.h>
 
 static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
 {
-       if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
-               printk(KERN_ERR "pseries rng hcall error\n");
-               return 0;
+       int rc;
+
+       rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+       if (rc != H_SUCCESS) {
+               pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+               return -EIO;
        }
+
+       /* The hypervisor interface returns 64 bits */
        return 8;
 }
 
index 7737b5bd26af816e4361a906ad9d0cd6966b7287..7a744d39175638a381835a8cadce7039f58ee3bf 100644 (file)
@@ -640,7 +640,7 @@ struct timer_rand_state {
  */
 void add_device_randomness(const void *buf, unsigned int size)
 {
-       unsigned long time = get_cycles() ^ jiffies;
+       unsigned long time = random_get_entropy() ^ jiffies;
 
        mix_pool_bytes(&input_pool, buf, size, NULL);
        mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
                goto out;
 
        sample.jiffies = jiffies;
-       sample.cycles = get_cycles();
+       sample.cycles = random_get_entropy();
        sample.num = num;
        mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
 
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
        struct fast_pool        *fast_pool = &__get_cpu_var(irq_randomness);
        struct pt_regs          *regs = get_irq_regs();
        unsigned long           now = jiffies;
-       __u32                   input[4], cycles = get_cycles();
+       __u32                   input[4], cycles = random_get_entropy();
 
        input[0] = cycles ^ jiffies;
        input[1] = irq;
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
 
 static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
 
-static int __init random_int_secret_init(void)
+int random_int_secret_init(void)
 {
        get_random_bytes(random_int_secret, sizeof(random_int_secret));
        return 0;
 }
-late_initcall(random_int_secret_init);
 
 /*
  * Get a random word for internal kernel use only. Similar to urandom but
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
 
        hash = get_cpu_var(get_random_int_hash);
 
-       hash[0] += current->pid + jiffies + get_cycles();
+       hash[0] += current->pid + jiffies + random_get_entropy();
        md5_transform(hash, random_int_secret);
        ret = hash[0];
        put_cpu_var(get_random_int_hash);
index f3223aac4df11c41959a744bee67af2d2df232e5..db5fa4e9b9e50f3a88bce0993b01420b53e8735f 100644 (file)
@@ -285,9 +285,9 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations raw_fops = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = blkdev_aio_write,
+       .write_iter     = blkdev_write_iter,
        .fsync          = blkdev_fsync,
        .open           = raw_open,
        .release        = raw_release,
index 7a7929ba26588dbf07d014bf8bb3cf8f55da509a..94c280d36e8b3bfaea00ee36c4fe3215818b3c86 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/io/tpmif.h>
 #include <xen/grant_table.h>
@@ -142,32 +143,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        return length;
 }
 
-ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 locality = priv->shr->locality;
-
-       return sprintf(buf, "%d\n", locality);
-}
-
-ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t len)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 val;
-
-       int rv = kstrtou8(buf, 0, &val);
-       if (rv)
-               return rv;
-
-       priv->shr->locality = val;
-
-       return len;
-}
-
 static const struct file_operations vtpm_ops = {
        .owner = THIS_MODULE,
        .llseek = no_llseek,
@@ -188,8 +163,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
-               tpm_store_locality);
 
 static struct attribute *vtpm_attrs[] = {
        &dev_attr_pubek.attr,
@@ -202,7 +175,6 @@ static struct attribute *vtpm_attrs[] = {
        &dev_attr_cancel.attr,
        &dev_attr_durations.attr,
        &dev_attr_timeouts.attr,
-       &dev_attr_locality.attr,
        NULL,
 };
 
@@ -210,8 +182,6 @@ static struct attribute_group vtpm_attr_grp = {
        .attrs = vtpm_attrs,
 };
 
-#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
-
 static const struct tpm_vendor_specific tpm_vtpm = {
        .status = vtpm_status,
        .recv = vtpm_recv,
@@ -224,11 +194,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
        .miscdev = {
                .fops = &vtpm_ops,
        },
-       .duration = {
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-       },
 };
 
 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
index 41c69469ce2000ec223170970c856a2287f10db2..971d796e071d889c290029ada3b2dc862f10fcbb 100644 (file)
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
 
 config ARMADA_370_XP_TIMER
        bool
+       select CLKSRC_OF
 
 config ORION_TIMER
        select CLKSRC_OF
index 37f5325bec95936260c50b2a099ccc7fb000ba53..b9ddd9e3a2f599e2cc7424c1eac18d4280b2f850 100644 (file)
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
        clocksource_of_init_fn init_func;
 
        for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+               if (!of_device_is_available(np))
+                       continue;
+
                init_func = match->data;
                init_func(np);
        }
index b9c81b7c3a3bfa5a251129e42c687b1e0aeeb851..3a5909c12d420dbbb6f54f011882303ba4a48a62 100644 (file)
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
        ced->name = dev_name(&p->pdev->dev);
        ced->features = CLOCK_EVT_FEAT_ONESHOT;
        ced->rating = 200;
-       ced->cpumask = cpumask_of(0);
+       ced->cpumask = cpu_possible_mask;
        ced->set_next_event = em_sti_clock_event_next;
        ced->set_mode = em_sti_clock_event_mode;
 
index 5b34768f4d7c79f68253966acc8e8268e53f318e..62b0de6a18370fade34eca20205557bd5871cc3a 100644 (file)
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
                                evt->irq);
                        return -EIO;
                }
-               irq_set_affinity(evt->irq, cpumask_of(cpu));
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                                           unsigned long action, void *hcpu)
 {
        struct mct_clock_event_device *mevt;
+       unsigned int cpu;
 
        /*
         * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_setup(&mevt->evt);
                break;
+       case CPU_ONLINE:
+               cpu = (unsigned long)hcpu;
+               if (mct_int_type == MCT_INT_SPI)
+                       irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+                                               cpumask_of(cpu));
+               break;
        case CPU_DYING:
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
                                         &percpu_mct_tick);
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     mct_irqs[MCT_L0_IRQ], err);
+       } else {
+               irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
        }
 
        err = register_cpu_notifier(&exynos4_mct_cpu_nb);
index 08ae128cce9be2e930454088c1041478bce8d8f5..c73fc2b74de2a1dd0665bde5693b1fce03f6d6c5 100644 (file)
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        /*  If cn_netlink_send() failed, the data is not sent */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        ev->what = which_id;
        ev->event_data.id.process_pid = task->pid;
        ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        msg->seq = rcvd_seq;
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
index 6ecfa758942c50a4b33ebd399cd831ac1420277d..a36749f1e44a869418e1bcab6481334948618391 100644 (file)
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
 
        data = nlmsg_data(nlh);
 
-       memcpy(data, msg, sizeof(*data) + msg->len);
+       memcpy(data, msg, size);
 
        NETLINK_CB(skb).dst_group = group;
 
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
 static void cn_rx_skb(struct sk_buff *__skb)
 {
        struct nlmsghdr *nlh;
-       int err;
        struct sk_buff *skb;
+       int len, err;
 
        skb = skb_get(__skb);
 
        if (skb->len >= NLMSG_HDRLEN) {
                nlh = nlmsg_hdr(skb);
+               len = nlmsg_len(nlh);
 
-               if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+               if (len < (int)sizeof(struct cn_msg) ||
                    skb->len < nlh->nlmsg_len ||
-                   nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+                   len > CONNECTOR_MAX_MSG_SIZE) {
                        kfree_skb(skb);
                        return;
                }
index 534fcb8251538a31d2695313b1d565990f8d51d4..38093e272377b2729939400c581aeb9706424011 100644 (file)
@@ -17,15 +17,11 @@ config CPU_FREQ
 
 if CPU_FREQ
 
-config CPU_FREQ_TABLE
-       tristate
-
 config CPU_FREQ_GOV_COMMON
        bool
 
 config CPU_FREQ_STAT
        tristate "CPU frequency translation statistics"
-       select CPU_FREQ_TABLE
        default y
        help
          This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
 
 config CPU_FREQ_GOV_ONDEMAND
        tristate "'ondemand' cpufreq policy governor"
-       select CPU_FREQ_TABLE
        select CPU_FREQ_GOV_COMMON
        help
          'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
 config GENERIC_CPUFREQ_CPU0
        tristate "Generic CPU0 cpufreq driver"
        depends on HAVE_CLK && REGULATOR && PM_OPP && OF
-       select CPU_FREQ_TABLE
        help
          This adds a generic cpufreq driver for CPU0 frequency management.
          It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@ depends on IA64
 
 config IA64_ACPI_CPUFREQ
        tristate "ACPI Processor P-States driver"
-       select CPU_FREQ_TABLE
        depends on ACPI_PROCESSOR
        help
        This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@ depends on MIPS
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
-       select CPU_FREQ_TABLE
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
 depends on SPARC64
 config SPARC_US3_CPUFREQ
        tristate "UltraSPARC-III CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for UltraSPARC-III processors.
 
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
 
 config SPARC_US2E_CPUFREQ
        tristate "UltraSPARC-IIe CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for UltraSPARC-IIe processors.
 
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
 depends on SUPERH
 config SH_CPU_FREQ
        tristate "SuperH CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the cpufreq driver for SuperH. Any CPU that supports
          clock rate rounding through the clock framework can use this
index 0fa204b244bd2df52974a50fc4290e9fa77d287c..701ec95ce95487cb29dbb9a1ba45012984085299 100644 (file)
@@ -5,7 +5,6 @@
 config ARM_BIG_LITTLE_CPUFREQ
        tristate "Generic ARM big LITTLE CPUfreq driver"
        depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
-       select CPU_FREQ_TABLE
        help
          This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
 
 config ARM_EXYNOS_CPUFREQ
        bool
-       select CPU_FREQ_TABLE
 
 config ARM_EXYNOS4210_CPUFREQ
        bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
        depends on SOC_EXYNOS5440
        depends on HAVE_CLK && PM_OPP && OF
        default y
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Samsung EXYNOS5440
          SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6Q cpufreq support"
        depends on SOC_IMX6Q
        depends on REGULATOR_ANATOP
-       select CPU_FREQ_TABLE
        help
          This adds cpufreq driver support for Freescale i.MX6Q SOC.
 
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
 
 config ARM_KIRKWOOD_CPUFREQ
        def_bool ARCH_KIRKWOOD && OF
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
        bool "TI OMAP2+"
        depends on ARCH_OMAP2PLUS
        default ARCH_OMAP2PLUS
-       select CPU_FREQ_TABLE
 
 config ARM_S3C_CPUFREQ
        bool
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
 config ARM_S3C2416_CPUFREQ
        bool "S3C2416 CPU Frequency scaling support"
        depends on CPU_S3C2416
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for the Samsung S3C2416 and
          S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
 config ARM_S3C64XX_CPUFREQ
        bool "Samsung S3C64XX"
        depends on CPU_S3C6410
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
 config ARM_S5PV210_CPUFREQ
        bool "Samsung S5PV210 and S5PC110"
        depends on CPU_S5PV210
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
 config ARM_SPEAR_CPUFREQ
        bool "SPEAr CPUFreq support"
        depends on PLAT_SPEAR
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,6 @@ config ARM_SPEAR_CPUFREQ
 config ARM_TEGRA_CPUFREQ
        bool "TEGRA CPUFreq support"
        depends on ARCH_TEGRA
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver support for TEGRA SOCs.
index 25ca9db62e090d6b49e20d6744c3cd4027a16bfb..ca0021a96e19738abefa9538bcb590e3e2c3faa7 100644 (file)
@@ -1,7 +1,6 @@
 config CPU_FREQ_CBE
        tristate "CBE frequency scaling"
        depends on CBE_RAS && PPC_CELL
-       select CPU_FREQ_TABLE
        default m
        help
          This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
 config CPU_FREQ_MAPLE
        bool "Support for Maple 970FX Evaluation Board"
        depends on PPC_MAPLE
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Maple 970FX
          Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
 config PPC_CORENET_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
        depends on PPC_E500MC && OF && COMMON_CLK
-       select CPU_FREQ_TABLE
        select CLK_PPC_CORENET
        help
          This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
 config CPU_FREQ_PMAC
        bool "Support for Apple PowerBooks"
        depends on ADB_PMU && PPC32
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Apple PowerBooks,
          this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
 config CPU_FREQ_PMAC64
        bool "Support for some Apple G5s"
        depends on PPC_PMAC && PPC64
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Apple iMac G5,
          and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
 config PPC_PASEMI_CPUFREQ
        bool "Support for PA Semi PWRficient"
        depends on PPC_PASEMI
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the support for frequency switching on PA Semi
index e2b6eabef2218d4e1e392ed523b3451553e196cb..6897ad85b0467a8200c3a529e5dee14344c2afbf 100644 (file)
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
 
 config X86_ACPI_CPUFREQ
        tristate "ACPI Processor P-States driver"
-       select CPU_FREQ_TABLE
        depends on ACPI_PROCESSOR
        help
          This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
 
 config ELAN_CPUFREQ
        tristate "AMD Elan SC400 and SC410"
-       select CPU_FREQ_TABLE
        depends on MELAN
        ---help---
          This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
 
 config SC520_CPUFREQ
        tristate "AMD Elan SC520"
-       select CPU_FREQ_TABLE
        depends on MELAN
        ---help---
          This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@ config SC520_CPUFREQ
 
 config X86_POWERNOW_K6
        tristate "AMD Mobile K6-2/K6-3 PowerNow!"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
 
 config X86_POWERNOW_K7
        tristate "AMD Mobile Athlon/Duron PowerNow!"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
 
 config X86_POWERNOW_K8
        tristate "AMD Opteron/Athlon64 PowerNow!"
-       select CPU_FREQ_TABLE
        depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
        help
          This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,7 +126,6 @@ config X86_POWERNOW_K8
 config X86_AMD_FREQ_SENSITIVITY
        tristate "AMD frequency sensitivity feedback powersave bias"
        depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
-       select CPU_FREQ_TABLE
        help
          This adds AMD-specific powersave bias function to the ondemand
          governor, which allows it to make more power-conscious frequency
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
 
 config X86_SPEEDSTEP_CENTRINO
        tristate "Intel Enhanced SpeedStep (deprecated)"
-       select CPU_FREQ_TABLE
        select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
        depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
        help
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
 
 config X86_SPEEDSTEP_ICH
        tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
 
 config X86_SPEEDSTEP_SMI
        tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
 
 config X86_P4_CLOCKMOD
        tristate "Intel Pentium 4 clock modulation"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Intel Pentium 4 / XEON
          processors.  When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@ config X86_LONGRUN
 
 config X86_LONGHAUL
        tristate "VIA Cyrix III Longhaul"
-       select CPU_FREQ_TABLE
        depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@ config X86_LONGHAUL
 
 config X86_E_POWERSAVER
        tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
-       select CPU_FREQ_TABLE
        depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA C7 processors.  However, this driver
index ad5866c2ada0a5bd219305954682dbc7aae4f5a2..b7948bbbbf1fe7346590e79e3e6cbd9655272000 100644 (file)
@@ -1,5 +1,5 @@
 # CPUfreq core
-obj-$(CONFIG_CPU_FREQ)                 += cpufreq.o
+obj-$(CONFIG_CPU_FREQ)                 += cpufreq.o freq_table.o
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)   += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE)           += freq_table.o
-
 obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)     += cpufreq-cpu0.o
 
 ##################################################################################
index a1260b4549db647336192d24b3471b6b627897af..a1717d7367c1ad0bd29a3dd54de644791d648cc7 100644 (file)
@@ -516,15 +516,6 @@ out:
        return result;
 }
 
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
-       pr_debug("acpi_cpufreq_verify\n");
-
-       return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
@@ -837,7 +828,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
        perf->state = 0;
 
-       result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, data->freq_table);
        if (result)
                goto err_freqfree;
 
@@ -846,12 +837,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        switch (perf->control_register.space_id) {
        case ACPI_ADR_SPACE_SYSTEM_IO:
-               /* Current speed is unknown and not detectable by IO port */
+               /*
+                * The core will not set policy->cur, because
+                * cpufreq_driver->get is NULL, so we need to set it here.
+                * However, we have to guess it, because the current speed is
+                * unknown and not detectable via IO ports.
+                */
                policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
                acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
-               policy->cur = get_cur_freq_on_cpu(cpu);
                break;
        default:
                break;
@@ -868,8 +863,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                        (u32) perf->states[i].power,
                        (u32) perf->states[i].transition_latency);
 
-       cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
        /*
         * the first call to ->target() should result in us actually
         * writing something to the appropriate registers.
@@ -929,7 +922,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
 };
 
 static struct cpufreq_driver acpi_cpufreq_driver = {
-       .verify         = acpi_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = acpi_cpufreq_target,
        .bios_limit     = acpi_processor_get_bios_limit,
        .init           = acpi_cpufreq_cpu_init,
@@ -986,6 +979,10 @@ static int __init acpi_cpufreq_init(void)
 {
        int ret;
 
+       /* don't keep reloading if cpufreq_driver exists */
+       if (cpufreq_get_current_driver())
+               return 0;
+
        if (acpi_disabled)
                return 0;
 
index 3549f0784af176d07ee17dd60d02ec85859d6bd1..086f7c17ff5816001ab9be359741b9292539cf47 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/topology.h>
 #include <linux/types.h>
@@ -47,14 +47,6 @@ static unsigned int bL_cpufreq_get(unsigned int cpu)
        return clk_get_rate(clk[cur_cluster]) / 1000;
 }
 
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
-{
-       u32 cur_cluster = cpu_to_cluster(policy->cpu);
-
-       return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
-}
-
 /* Set clock frequency */
 static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
                unsigned int target_freq, unsigned int relation)
@@ -98,7 +90,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
 
        if (!atomic_dec_return(&cluster_usage[cluster])) {
                clk_put(clk[cluster]);
-               opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+               dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
                dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
        }
 }
@@ -119,7 +111,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
                goto atomic_dec;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
        if (ret) {
                dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
                                __func__, cpu_dev->id, ret);
@@ -127,7 +119,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
        }
 
        name[12] = cluster + '0';
-       clk[cluster] = clk_get_sys(name, NULL);
+       clk[cluster] = clk_get(cpu_dev, name);
        if (!IS_ERR(clk[cluster])) {
                dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
                                __func__, clk[cluster], freq_table[cluster],
@@ -138,7 +130,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
        dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
                        __func__, cpu_dev->id, cluster);
        ret = PTR_ERR(clk[cluster]);
-       opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 
 atomic_dec:
        atomic_dec(&cluster_usage[cluster]);
@@ -165,7 +157,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
        if (ret)
                return ret;
 
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+       ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
        if (ret) {
                dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
                                policy->cpu, cur_cluster);
@@ -173,16 +165,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
-
        if (arm_bL_ops->get_transition_latency)
                policy->cpuinfo.transition_latency =
                        arm_bL_ops->get_transition_latency(cpu_dev);
        else
                policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 
-       policy->cur = bL_cpufreq_get(policy->cpu);
-
        cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
        dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
@@ -200,28 +188,23 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
+       cpufreq_frequency_table_put_attr(policy->cpu);
        put_cluster_clk_and_freq_table(cpu_dev);
        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
        return 0;
 }
 
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver bL_cpufreq_driver = {
        .name                   = "arm-big-little",
-       .flags                  = CPUFREQ_STICKY,
-       .verify                 = bL_cpufreq_verify_policy,
+       .flags                  = CPUFREQ_STICKY |
+                                       CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+       .verify                 = cpufreq_generic_frequency_table_verify,
        .target                 = bL_cpufreq_set_target,
        .get                    = bL_cpufreq_get,
        .init                   = bL_cpufreq_init,
        .exit                   = bL_cpufreq_exit,
-       .have_governor_per_policy = true,
-       .attr                   = bL_cpufreq_attr,
+       .attr                   = cpufreq_generic_attr,
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
index 480c0bd0468d7c0fbbed9e0872cd9adb4fb77244..8d9d5910890689e58d5994d6aeebfccf03215c59 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
index e0c38d9389979b2f1b9a503d36c8e12364ecc188..7439deddd5cf72794f8580712f11ca7b6388e6f6 100644 (file)
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 
 static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                       policy->cpuinfo.max_freq);
-       return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
 
 static unsigned int at32_get_speed(unsigned int cpu)
 {
@@ -85,31 +77,68 @@ static int at32_set_target(struct cpufreq_policy *policy,
 
 static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
+       unsigned int frequency, rate, min_freq;
+       int retval, steps, i;
+
        if (policy->cpu != 0)
                return -EINVAL;
 
        cpuclk = clk_get(NULL, "cpu");
        if (IS_ERR(cpuclk)) {
                pr_debug("cpufreq: could not get CPU clk\n");
-               return PTR_ERR(cpuclk);
+               retval = PTR_ERR(cpuclk);
+               goto out_err;
        }
 
-       policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
-       policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+       min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+       frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
        policy->cpuinfo.transition_latency = 0;
-       policy->cur = at32_get_speed(0);
-       policy->min = policy->cpuinfo.min_freq;
-       policy->max = policy->cpuinfo.max_freq;
 
-       printk("cpufreq: AT32AP CPU frequency driver\n");
+       /*
+        * AVR32 CPU frequency rate scales in power of two between maximum and
+        * minimum, also add space for the table end marker.
+        *
+        * Further validate that the frequency is usable, and append it to the
+        * frequency table.
+        */
+       steps = fls(frequency / min_freq) + 1;
+       freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+                       GFP_KERNEL);
+       if (!freq_table) {
+               retval = -ENOMEM;
+               goto out_err_put_clk;
+       }
+
+       for (i = 0; i < (steps - 1); i++) {
+               rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
 
-       return 0;
+               if (rate != frequency)
+                       freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+               else
+                       freq_table[i].frequency = frequency;
+
+               frequency /= 2;
+       }
+
+       freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+       retval = cpufreq_table_validate_and_show(policy, freq_table);
+       if (!retval) {
+               printk("cpufreq: AT32AP CPU frequency driver\n");
+               return 0;
+       }
+
+       kfree(freq_table);
+out_err_put_clk:
+       clk_put(cpuclk);
+out_err:
+       return retval;
 }
 
 static struct cpufreq_driver at32_driver = {
        .name           = "at32ap",
        .init           = at32_cpufreq_driver_init,
-       .verify         = at32_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = at32_set_target,
        .get            = at32_get_speed,
        .flags          = CPUFREQ_STICKY,
index ef05978a723702de29da757eda20edec7d21c3d6..0bc9e8c2c79bb1404a8539f6ff8760c5ab966727 100644 (file)
@@ -191,11 +191,6 @@ static int bfin_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
 static int __bfin_cpu_init(struct cpufreq_policy *policy)
 {
 
@@ -209,23 +204,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
 
        policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
 
-       policy->cur = cclk;
-       cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
-       return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+       return cpufreq_table_validate_and_show(policy, bfin_freq_table);
 }
 
-static struct freq_attr *bfin_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver bfin_driver = {
-       .verify = bfin_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = bfin_target,
        .get = bfin_getfreq_khz,
        .init = __bfin_cpu_init,
+       .exit = cpufreq_generic_exit,
        .name = "bfin cpufreq",
-       .attr = bfin_freq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int __init bfin_cpu_init(void)
index cbfffa91ebdd46054d21f5c4138909b549787b06..33ab6504c4478ae344395a746cddde0341ca2c52 100644 (file)
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
 #include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
@@ -29,11 +30,6 @@ static struct clk *cpu_clk;
 static struct regulator *cpu_reg;
 static struct cpufreq_frequency_table *freq_table;
 
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int cpu0_get_speed(unsigned int cpu)
 {
        return clk_get_rate(cpu_clk) / 1000;
@@ -43,7 +39,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                           unsigned int target_freq, unsigned int relation)
 {
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long volt = 0, volt_old = 0, tol = 0;
        long freq_Hz, freq_exact;
        unsigned int index;
@@ -71,7 +67,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
 
        if (!IS_ERR(cpu_reg)) {
                rcu_read_lock();
-               opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+               opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        pr_err("failed to find OPP for %ld\n", freq_Hz);
@@ -79,7 +75,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                        ret = PTR_ERR(opp);
                        goto post_notify;
                }
-               volt = opp_get_voltage(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                tol = volt * voltage_tolerance / 100;
                volt_old = regulator_get_voltage(cpu_reg);
@@ -126,50 +122,18 @@ post_notify:
 
 static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (ret) {
-               pr_err("invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = clk_get_rate(cpu_clk) / 1000;
-
-       /*
-        * The driver only supports the SMP configuartion where all processors
-        * share the clock and voltage and clock.  Use cpufreq affected_cpus
-        * interface to have all CPUs scaled together.
-        */
-       cpumask_setall(policy->cpus);
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static struct freq_attr *cpu0_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cpu0_cpufreq_driver = {
        .flags = CPUFREQ_STICKY,
-       .verify = cpu0_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cpu0_set_target,
        .get = cpu0_get_speed,
        .init = cpu0_cpufreq_init,
-       .exit = cpu0_cpufreq_exit,
+       .exit = cpufreq_generic_exit,
        .name = "generic_cpu0",
-       .attr = cpu0_cpufreq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -177,7 +141,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        struct device_node *np;
        int ret;
 
-       cpu_dev = &pdev->dev;
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               return -ENODEV;
+       }
 
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
@@ -213,7 +181,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                pr_err("failed to init cpufreq table: %d\n", ret);
                goto out_put_node;
@@ -224,8 +192,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        if (of_property_read_u32(np, "clock-latency", &transition_latency))
                transition_latency = CPUFREQ_ETERNAL;
 
-       if (cpu_reg) {
-               struct opp *opp;
+       if (!IS_ERR(cpu_reg)) {
+               struct dev_pm_opp *opp;
                unsigned long min_uV, max_uV;
                int i;
 
@@ -237,12 +205,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
                        ;
                rcu_read_lock();
-               opp = opp_find_freq_exact(cpu_dev,
+               opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                freq_table[0].frequency * 1000, true);
-               min_uV = opp_get_voltage(opp);
-               opp = opp_find_freq_exact(cpu_dev,
+               min_uV = dev_pm_opp_get_voltage(opp);
+               opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                freq_table[i-1].frequency * 1000, true);
-               max_uV = opp_get_voltage(opp);
+               max_uV = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
                if (ret > 0)
@@ -259,7 +227,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 out_free_table:
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_put_node:
        of_node_put(np);
        return ret;
@@ -268,7 +236,7 @@ out_put_node:
 static int cpu0_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&cpu0_cpufreq_driver);
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
        return 0;
 }
index b83d45f6857495083163e13336027ef4449b5da2..a05b876f375e5672c00b05412782c23ead83a773 100644 (file)
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
        if (policy->min < (fsb_pol_max * fid * 100))
                policy->max = (fsb_pol_max + 1) * fid * 100;
 
-       cpufreq_verify_within_limits(policy,
-                                    policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
        policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
        policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = nforce2_get(policy->cpu);
 
        return 0;
 }
index 43c24aa756f6f29935af8eb0614945a442a36a19..ec391d7f010b531a2fe378df96a88780b5c5faf2 100644 (file)
@@ -67,13 +67,11 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
 
 #define lock_policy_rwsem(mode, cpu)                                   \
-static int lock_policy_rwsem_##mode(int cpu)                           \
+static void lock_policy_rwsem_##mode(int cpu)                          \
 {                                                                      \
        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
        BUG_ON(!policy);                                                \
        down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));           \
-                                                                       \
-       return 0;                                                       \
 }
 
 lock_policy_rwsem(read, cpu);
@@ -135,7 +133,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
 
 bool have_governor_per_policy(void)
 {
-       return cpufreq_driver->have_governor_per_policy;
+       return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 }
 EXPORT_SYMBOL_GPL(have_governor_per_policy);
 
@@ -183,6 +181,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 }
 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+               struct cpufreq_frequency_table *table,
+               unsigned int transition_latency)
+{
+       int ret;
+
+       ret = cpufreq_table_validate_and_show(policy, table);
+       if (ret) {
+               pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+               return ret;
+       }
+
+       policy->cpuinfo.transition_latency = transition_latency;
+
+       /*
+        * The driver only supports the SMP configuartion where all processors
+        * share the clock and voltage and clock.
+        */
+       cpumask_setall(policy->cpus);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
        struct cpufreq_policy *policy = NULL;
@@ -414,7 +443,7 @@ show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
 
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy);
 
 /**
@@ -435,7 +464,7 @@ static ssize_t store_##file_name                                    \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       ret = __cpufreq_set_policy(policy, &new_policy);                \
+       ret = cpufreq_set_policy(policy, &new_policy);          \
        policy->user_policy.object = policy->object;                    \
                                                                        \
        return ret ? ret : count;                                       \
@@ -493,11 +522,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
                                                &new_policy.governor))
                return -EINVAL;
 
-       /*
-        * Do not use cpufreq_set_policy here or the user_policy.max
-        * will be wrongly overridden
-        */
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
 
        policy->user_policy.policy = policy->policy;
        policy->user_policy.governor = policy->governor;
@@ -653,13 +678,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 {
        struct cpufreq_policy *policy = to_policy(kobj);
        struct freq_attr *fattr = to_attr(attr);
-       ssize_t ret = -EINVAL;
+       ssize_t ret;
 
        if (!down_read_trylock(&cpufreq_rwsem))
-               goto exit;
+               return -EINVAL;
 
-       if (lock_policy_rwsem_read(policy->cpu) < 0)
-               goto up_read;
+       lock_policy_rwsem_read(policy->cpu);
 
        if (fattr->show)
                ret = fattr->show(policy, buf);
@@ -667,10 +691,8 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
                ret = -EIO;
 
        unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
        up_read(&cpufreq_rwsem);
-exit:
+
        return ret;
 }
 
@@ -689,8 +711,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
        if (!down_read_trylock(&cpufreq_rwsem))
                goto unlock;
 
-       if (lock_policy_rwsem_write(policy->cpu) < 0)
-               goto up_read;
+       lock_policy_rwsem_write(policy->cpu);
 
        if (fattr->store)
                ret = fattr->store(policy, buf, count);
@@ -699,7 +720,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 
        unlock_policy_rwsem_write(policy->cpu);
 
-up_read:
        up_read(&cpufreq_rwsem);
 unlock:
        put_online_cpus();
@@ -844,11 +864,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
-       /* assure that the starting sequence is run in __cpufreq_set_policy */
+       /* assure that the starting sequence is run in cpufreq_set_policy */
        policy->governor = NULL;
 
        /* set default policy */
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
        policy->user_policy.policy = policy->policy;
        policy->user_policy.governor = policy->governor;
 
@@ -949,15 +969,24 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
 
 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
-       if (cpu == policy->cpu)
+       if (WARN_ON(cpu == policy->cpu))
                return;
 
+       /*
+        * Take direct locks as lock_policy_rwsem_write wouldn't work here.
+        * Also lock for last cpu is enough here as contention will happen only
+        * after policy->cpu is changed and after it is changed, other threads
+        * will try to acquire lock for new cpu. And policy is already updated
+        * by then.
+        */
+       down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+
        policy->last_cpu = policy->cpu;
        policy->cpu = cpu;
 
-#ifdef CONFIG_CPU_FREQ_TABLE
+       up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+
        cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
@@ -1042,6 +1071,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
                goto err_set_policy_cpu;
        }
 
+       if (cpufreq_driver->get) {
+               policy->cur = cpufreq_driver->get(policy->cpu);
+               if (!policy->cur) {
+                       pr_err("%s: ->get() failed\n", __func__);
+                       goto err_get_freq;
+               }
+       }
+
        /* related cpus should atleast have policy->cpus */
        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
@@ -1096,6 +1133,9 @@ err_out_unregister:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+err_get_freq:
+       if (cpufreq_driver->exit)
+               cpufreq_driver->exit(policy);
 err_set_policy_cpu:
        cpufreq_policy_free(policy);
 nomem_out:
@@ -1125,7 +1165,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        int ret;
 
        /* first sibling now owns the new sysfs dir */
-       cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+       cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
 
        /* Don't touch sysfs files during light-weight tear-down */
        if (frozen)
@@ -1136,7 +1176,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        if (ret) {
                pr_err("%s: Failed to move kobj: %d", __func__, ret);
 
-               WARN_ON(lock_policy_rwsem_write(old_cpu));
+               lock_policy_rwsem_write(old_cpu);
                cpumask_set_cpu(old_cpu, policy->cpus);
                unlock_policy_rwsem_write(old_cpu);
 
@@ -1189,27 +1229,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                        policy->governor->name, CPUFREQ_NAME_LEN);
 #endif
 
-       WARN_ON(lock_policy_rwsem_write(cpu));
+       lock_policy_rwsem_read(cpu);
        cpus = cpumask_weight(policy->cpus);
-
-       if (cpus > 1)
-               cpumask_clear_cpu(cpu, policy->cpus);
-       unlock_policy_rwsem_write(cpu);
+       unlock_policy_rwsem_read(cpu);
 
        if (cpu != policy->cpu) {
                if (!frozen)
                        sysfs_remove_link(&dev->kobj, "cpufreq");
        } else if (cpus > 1) {
-
                new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
                if (new_cpu >= 0) {
-                       WARN_ON(lock_policy_rwsem_write(cpu));
                        update_policy_cpu(policy, new_cpu);
-                       unlock_policy_rwsem_write(cpu);
 
                        if (!frozen) {
-                               pr_debug("%s: policy Kobject moved to cpu: %d "
-                                        "from: %d\n",__func__, new_cpu, cpu);
+                               pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+                                               __func__, new_cpu, cpu);
                        }
                }
        }
@@ -1237,9 +1271,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                return -EINVAL;
        }
 
-       lock_policy_rwsem_read(cpu);
+       lock_policy_rwsem_write(cpu);
        cpus = cpumask_weight(policy->cpus);
-       unlock_policy_rwsem_read(cpu);
+
+       if (cpus > 1)
+               cpumask_clear_cpu(cpu, policy->cpus);
+       unlock_policy_rwsem_write(cpu);
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
@@ -1301,36 +1338,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
 }
 
 /**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
  *
  * Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
  */
-static inline int __cpufreq_remove_dev(struct device *dev,
-                                      struct subsys_interface *sif,
-                                      bool frozen)
-{
-       int ret;
-
-       ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
-       if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
-       return ret;
-}
-
 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id;
-       int retval;
+       int ret;
 
        if (cpu_is_offline(cpu))
                return 0;
 
-       retval = __cpufreq_remove_dev(dev, sif, false);
-       return retval;
+       ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+       if (!ret)
+               ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+       return ret;
 }
 
 static void handle_update(struct work_struct *work)
@@ -1451,17 +1476,17 @@ unsigned int cpufreq_get(unsigned int cpu)
 {
        unsigned int ret_freq = 0;
 
+       if (cpufreq_disabled() || !cpufreq_driver)
+               return -ENOENT;
+
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
-       if (unlikely(lock_policy_rwsem_read(cpu)))
-               goto out_policy;
+       lock_policy_rwsem_read(cpu);
 
        ret_freq = __cpufreq_get(cpu);
 
        unlock_policy_rwsem_read(cpu);
-
-out_policy:
        up_read(&cpufreq_rwsem);
 
        return ret_freq;
@@ -1685,14 +1710,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
 {
        int ret = -EINVAL;
 
-       if (unlikely(lock_policy_rwsem_write(policy->cpu)))
-               goto fail;
+       lock_policy_rwsem_write(policy->cpu);
 
        ret = __cpufreq_driver_target(policy, target_freq, relation);
 
        unlock_policy_rwsem_write(policy->cpu);
 
-fail:
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1859,10 +1882,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
 EXPORT_SYMBOL(cpufreq_get_policy);
 
 /*
- * data   : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
  */
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy)
 {
        int ret = 0, failed = 1;
@@ -1983,10 +2006,7 @@ int cpufreq_update_policy(unsigned int cpu)
                goto no_policy;
        }
 
-       if (unlikely(lock_policy_rwsem_write(cpu))) {
-               ret = -EINVAL;
-               goto fail;
-       }
+       lock_policy_rwsem_write(cpu);
 
        pr_debug("updating policy for CPU %u\n", cpu);
        memcpy(&new_policy, policy, sizeof(*policy));
@@ -2011,11 +2031,10 @@ int cpufreq_update_policy(unsigned int cpu)
                }
        }
 
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
 
        unlock_policy_rwsem_write(cpu);
 
-fail:
        cpufreq_cpu_put(policy);
 no_policy:
        return ret;
@@ -2095,7 +2114,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-               return -EBUSY;
+               return -EEXIST;
        }
        cpufreq_driver = driver_data;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
index 88cd39f7b0e9573b4fc7e5ca13ac0126cf6ede9d..b5f2b8618949dc75d55202d7b76e56e1e0c52a5f 100644 (file)
@@ -191,7 +191,10 @@ struct common_dbs_data {
        struct attribute_group *attr_group_gov_sys; /* one governor - system */
        struct attribute_group *attr_group_gov_pol; /* one governor - policy */
 
-       /* Common data for platforms that don't set have_governor_per_policy */
+       /*
+        * Common data for platforms that don't set
+        * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+        */
        struct dbs_data *gdbs_data;
 
        struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
index cb8276dd19caee0a5e2756883148a2d70a7132f2..05fdc7e40257f96a83940d3fbd601d45f4e8e77a 100644 (file)
@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
 static int cris_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -76,42 +71,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-       policy->cur = cris_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-       if (result)
-               return (result);
-
-       cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-       return 0;
-}
-
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-
-static struct freq_attr *cris_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
        .get    = cris_freq_get_cpu_frequency,
-       .verify = cris_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cris_freq_target,
        .init   = cris_freq_cpu_init,
-       .exit   = cris_freq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "cris_freq",
-       .attr   = cris_freq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
index 72328f77dc53bdef993173a1832d3159ac3fd03c..fac2b26932dd7fc4ff121e6bc6edd98ed65b0c47 100644 (file)
@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
 static int cris_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
@@ -75,40 +70,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 1000000;   /* 1ms */
-       policy->cur = cris_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-       if (result)
-               return (result);
-
-       cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-static struct freq_attr *cris_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
        .get = cris_freq_get_cpu_frequency,
-       .verify = cris_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cris_freq_target,
        .init = cris_freq_cpu_init,
-       .exit = cris_freq_cpu_exit,
+       .exit = cpufreq_generic_exit,
        .name = "cris_freq",
-       .attr = cris_freq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
index 551dd655c6f2ac1d49fb1238ee20f38c213e3f36..972583baf9e8d605b14b830b40b2c459ae1414b5 100644 (file)
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
        if (policy->cpu)
                return -EINVAL;
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
        policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
        cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -138,47 +136,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
                        return result;
        }
 
-       policy->cur = davinci_getspeed(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (result) {
-               pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
-                               __func__);
-               return result;
-       }
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
        /*
         * Time measurement across the target() function yields ~1500-1800us
         * time taken with no drivers on notification list.
         * Setting the latency to 2000 us to accommodate addition of drivers
         * to pre/post change notification list.
         */
-       policy->cpuinfo.transition_latency = 2000 * 1000;
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
 }
 
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver davinci_driver = {
        .flags          = CPUFREQ_STICKY,
        .verify         = davinci_verify_speed,
        .target         = davinci_target,
        .get            = davinci_getspeed,
        .init           = davinci_cpu_init,
-       .exit           = davinci_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "davinci",
-       .attr           = davinci_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init davinci_cpufreq_probe(struct platform_device *pdev)
index 26321cdc19464f1cd3b23f7bf6575d5d97fa6ffc..a60f7693c18e1b93809d53c5835c89583a8af379 100644 (file)
 static struct cpufreq_frequency_table *freq_table;
 static struct clk *armss_clk;
 
-static struct freq_attr *dbx500_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
                                unsigned int target_freq,
                                unsigned int relation)
@@ -84,43 +74,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
 
 static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int res;
-
-       /* get policy fields based on the table */
-       res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (!res)
-               cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-       else {
-               pr_err("dbx500-cpufreq: Failed to read policy table\n");
-               return res;
-       }
-
-       policy->min = policy->cpuinfo.min_freq;
-       policy->max = policy->cpuinfo.max_freq;
-       policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-       /*
-        * FIXME : Need to take time measurement across the target()
-        *         function with no/some/all drivers in the notification
-        *         list.
-        */
-       policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
-       /* policy sharing between dual CPUs */
-       cpumask_setall(policy->cpus);
-
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, 20 * 1000);
 }
 
 static struct cpufreq_driver dbx500_cpufreq_driver = {
        .flags  = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
-       .verify = dbx500_cpufreq_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = dbx500_cpufreq_target,
        .get    = dbx500_cpufreq_getspeed,
        .init   = dbx500_cpufreq_init,
        .name   = "DBX500",
-       .attr   = dbx500_cpufreq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int dbx500_cpufreq_probe(struct platform_device *pdev)
index 09f64cc830197fc1f80f605e41d73be6b0441435..2c11ce3c67bde69a9e199135349becdd065fa283 100644 (file)
@@ -198,12 +198,6 @@ static int eps_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int eps_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                       &eps_cpu[policy->cpu]->freq_table[0]);
-}
-
 static int eps_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int i;
@@ -401,15 +395,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
        }
 
        policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
-       policy->cur = fsb * current_multiplier;
 
-       ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+       ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
        if (ret) {
                kfree(centaur);
                return ret;
        }
 
-       cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
        return 0;
 }
 
@@ -424,19 +416,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *eps_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver eps_driver = {
-       .verify         = eps_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = eps_target,
        .init           = eps_cpu_init,
        .exit           = eps_cpu_exit,
        .get            = eps_get,
        .name           = "e_powersaver",
-       .attr           = eps_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
index 823a400d98fd6bc5f8c627db8151df572ffc68b9..d91a645a27aea800062dde1358fa02f5155137d0 100644 (file)
@@ -165,19 +165,6 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
 };
 
 
-/**
- *     elanfreq_validatespeed: test if frequency range is valid
- *     @policy: the policy to validate
- *
- *     This function checks if a given frequency range in kHz is valid
- *     for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
 static int elanfreq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -202,7 +189,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *c = &cpu_data(0);
        unsigned int i;
-       int result;
 
        /* capability check */
        if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +207,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = elanfreq_get_cpu_frequency(0);
 
-       result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
-       return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, elanfreq_table);
 }
 
 
@@ -261,20 +234,14 @@ __setup("elanfreq=", elanfreq_setup);
 #endif
 
 
-static struct freq_attr *elanfreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver elanfreq_driver = {
        .get            = elanfreq_get_cpu_frequency,
-       .verify         = elanfreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = elanfreq_target,
        .init           = elanfreq_cpu_init,
-       .exit           = elanfreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "elanfreq",
-       .attr           = elanfreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id elan_id[] = {
index 0fac34439e3171dbc97b8f717a935b691657cba9..3e4af676f43d79fba9c704049858e5c7026f5dac 100644 (file)
@@ -31,12 +31,6 @@ static unsigned int locking_frequency;
 static bool frequency_locked;
 static DEFINE_MUTEX(cpufreq_lock);
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             exynos_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
        return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -141,7 +135,7 @@ post_notify:
        if ((freqs.new < freqs.old) ||
           ((freqs.new > freqs.old) && safe_arm_volt)) {
                /* down the voltage after frequency change */
-               regulator_set_voltage(arm_regulator, arm_volt,
+               ret = regulator_set_voltage(arm_regulator, arm_volt,
                                arm_volt);
                if (ret) {
                        pr_err("%s: failed to set cpu voltage to %d\n",
@@ -247,38 +241,18 @@ static struct notifier_block exynos_cpufreq_nb = {
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
-       /* set the transition latency value */
-       policy->cpuinfo.transition_latency = 100000;
-
-       cpumask_setall(policy->cpus);
-
-       return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+       return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
 }
 
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *exynos_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver exynos_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = exynos_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = exynos_target,
        .get            = exynos_getspeed,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = exynos_cpufreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "exynos_cpufreq",
-       .attr           = exynos_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 #ifdef CONFIG_PM
        .suspend        = exynos_cpufreq_suspend,
        .resume         = exynos_cpufreq_resume,
index add7fbec4fc9af5ca3060e925cfe662a25d21eb4..f2c75065ce198694428d631baef388ad8e9aa6c6 100644 (file)
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
 
 static void exynos4210_set_apll(unsigned int index)
 {
-       unsigned int tmp;
+       unsigned int tmp, freq = apll_freq_4210[index].freq;
 
-       /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
        clk_set_parent(moutcore, mout_mpll);
 
        do {
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
                tmp &= 0x7;
        } while (tmp != 0x2);
 
-       /* 2. Set APLL Lock time */
-       __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
-
-       /* 3. Change PLL PMS values */
-       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-       tmp |= apll_freq_4210[index].mps;
-       __raw_writel(tmp, EXYNOS4_APLL_CON0);
+       clk_set_rate(mout_apll, freq * 1000);
 
-       /* 4. wait_lock_time */
-       do {
-               tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-       /* 5. MUX_CORE_SEL = APLL */
+       /* MUX_CORE_SEL = APLL */
        clk_set_parent(moutcore, mout_apll);
 
        do {
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
        } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
-       unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
-       unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
-       return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4210_set_frequency(unsigned int old_index,
                                     unsigned int new_index)
 {
-       unsigned int tmp;
-
        if (old_index > new_index) {
-               if (!exynos4210_pms_change(old_index, new_index)) {
-                       /* 1. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-
-                       /* 2. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4210[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-                       /* 2. Change the apll m,p,s value */
-                       exynos4210_set_apll(new_index);
-               }
+               exynos4210_set_clkdiv(new_index);
+               exynos4210_set_apll(new_index);
        } else if (old_index < new_index) {
-               if (!exynos4210_pms_change(old_index, new_index)) {
-                       /* 1. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4210[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-                       /* 2. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the apll m,p,s value */
-                       exynos4210_set_apll(new_index);
-                       /* 2. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-               }
+               exynos4210_set_apll(new_index);
+               exynos4210_set_clkdiv(new_index);
        }
 }
 
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
        info->volt_table = exynos4210_volt_table;
        info->freq_table = exynos4210_freq_table;
        info->set_freq = exynos4210_set_frequency;
-       info->need_apll_change = exynos4210_pms_change;
 
        return 0;
 
index 08b7477b0aa23ea139faa00aef4d6eca601d173c..8683304ce62cc4dba0c947e2be6049e1b94ffcaf 100644 (file)
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
 
 static void exynos4x12_set_apll(unsigned int index)
 {
-       unsigned int tmp, pdiv;
+       unsigned int tmp, freq = apll_freq_4x12[index].freq;
 
-       /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
        clk_set_parent(moutcore, mout_mpll);
 
        do {
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
                tmp &= 0x7;
        } while (tmp != 0x2);
 
-       /* 2. Set APLL Lock time */
-       pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+       clk_set_rate(mout_apll, freq * 1000);
 
-       __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
-       /* 3. Change PLL PMS values */
-       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-       tmp |= apll_freq_4x12[index].mps;
-       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-       /* 4. wait_lock_time */
-       do {
-               cpu_relax();
-               tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-       /* 5. MUX_CORE_SEL = APLL */
+       /* MUX_CORE_SEL = APLL */
        clk_set_parent(moutcore, mout_apll);
 
        do {
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
        } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
-       unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
-       unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
-       return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4x12_set_frequency(unsigned int old_index,
                                  unsigned int new_index)
 {
-       unsigned int tmp;
-
        if (old_index > new_index) {
-               if (!exynos4x12_pms_change(old_index, new_index)) {
-                       /* 1. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-                       /* 2. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4x12[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-                       /* 2. Change the apll m,p,s value */
-                       exynos4x12_set_apll(new_index);
-               }
+               exynos4x12_set_clkdiv(new_index);
+               exynos4x12_set_apll(new_index);
        } else if (old_index < new_index) {
-               if (!exynos4x12_pms_change(old_index, new_index)) {
-                       /* 1. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4x12[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-                       /* 2. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the apll m,p,s value */
-                       exynos4x12_set_apll(new_index);
-                       /* 2. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-               }
+               exynos4x12_set_apll(new_index);
+               exynos4x12_set_clkdiv(new_index);
        }
 }
 
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
        info->volt_table = exynos4x12_volt_table;
        info->freq_table = exynos4x12_freq_table;
        info->set_freq = exynos4x12_set_frequency;
-       info->need_apll_change = exynos4x12_pms_change;
 
        return 0;
 
index d514c152fd1a43041e8ff570fca7f9cc2b28f58d..8ae5e2925bf1e2e446bdb952a68f925d46da732e 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -118,12 +118,12 @@ static int init_div_table(void)
        struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
        unsigned int tmp, clk_div, ema_div, freq, volt_id;
        int i = 0;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
 
        rcu_read_lock();
        for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
 
-               opp = opp_find_freq_exact(dvfs_info->dev,
+               opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
                                        freq_tbl[i].frequency * 1000, true);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
@@ -142,7 +142,7 @@ static int init_div_table(void)
                                        << P0_7_CSCLKDEV_SHIFT;
 
                /* Calculate EMA */
-               volt_id = opp_get_voltage(opp);
+               volt_id = dev_pm_opp_get_voltage(opp);
                volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
                if (volt_id < PMIC_HIGH_VOLT) {
                        ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,12 +209,6 @@ static void exynos_enable_dvfs(void)
                                dvfs_info->base + XMU_DVFS_CTRL);
 }
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             dvfs_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
        return dvfs_info->cur_frequency;
@@ -324,30 +318,19 @@ static void exynos_sort_descend_freq_table(void)
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
-       if (ret) {
-               dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cur = dvfs_info->cur_frequency;
-       policy->cpuinfo.transition_latency = dvfs_info->latency;
-       cpumask_setall(policy->cpus);
-
-       cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
-       return 0;
+       return cpufreq_generic_init(policy, dvfs_info->freq_table,
+                       dvfs_info->latency);
 }
 
 static struct cpufreq_driver exynos_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = exynos_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = exynos_target,
        .get            = exynos_getspeed,
        .init           = exynos_cpufreq_cpu_init,
+       .exit           = cpufreq_generic_exit,
        .name           = CPUFREQ_NAME,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +382,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                goto err_put_node;
        }
 
-       ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+                                           &dvfs_info->freq_table);
        if (ret) {
                dev_err(dvfs_info->dev,
                        "failed to init cpufreq table: %d\n", ret);
                goto err_put_node;
        }
-       dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+       dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
        exynos_sort_descend_freq_table();
 
        if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,17 +438,17 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 err_free_table:
-       opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
        of_node_put(np);
-       dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+       dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
        return ret;
 }
 
 static int exynos_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&exynos_driver);
-       opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
        return 0;
 }
 
index f111454a7aeace94454e3c8eff6e7e45360380d7..3458d27f63b409e03b866e6b20e541a7de8cc769 100644 (file)
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table)
 {
-       unsigned int next_larger = ~0;
-       unsigned int i;
-       unsigned int count = 0;
+       unsigned int next_larger = ~0, freq, i = 0;
+       bool found = false;
 
        pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
                                        policy->min, policy->max, policy->cpu);
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
-       for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
-               unsigned int freq = table[i].frequency;
+       for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
                if (freq == CPUFREQ_ENTRY_INVALID)
                        continue;
-               if ((freq >= policy->min) && (freq <= policy->max))
-                       count++;
-               else if ((next_larger > freq) && (freq > policy->max))
+               if ((freq >= policy->min) && (freq <= policy->max)) {
+                       found = true;
+                       break;
+               }
+
+               if ((next_larger > freq) && (freq > policy->max))
                        next_larger = freq;
        }
 
-       if (!count)
+       if (!found) {
                policy->max = next_larger;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+               cpufreq_verify_within_cpu_limits(policy);
+       }
 
        pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
                                policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
 
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *table =
+               cpufreq_frequency_get_table(policy->cpu);
+       if (!table)
+               return -ENODEV;
+
+       return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
 };
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
+struct freq_attr *cpufreq_generic_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
 /*
  * if you use these, you must assure that the frequency table is valid
  * all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
 
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+                                     struct cpufreq_frequency_table *table)
+{
+       int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+       if (!ret)
+               cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
 void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
 {
        pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
index 70442c7b5e71aed7e57ce61b20893e42dc36a215..d83e8266a58e2eddf824696c80240ced024bab57 100644 (file)
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
 
 static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
 {
-       unsigned int maxfreq, curfreq;
+       unsigned int maxfreq;
 
        if (!policy || policy->cpu != 0)
                return -ENODEV;
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
                maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
 
        stock_freq = maxfreq;
-       curfreq = gx_get_cpuspeed(0);
 
        pr_debug("cpu max frequency is %d.\n", maxfreq);
-       pr_debug("cpu current frequency is %dkHz.\n", curfreq);
 
        /* setup basic struct for cpufreq API */
        policy->cpu = 0;
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
        else
                policy->min = maxfreq / POLICY_MIN_DIV;
        policy->max = maxfreq;
-       policy->cur = curfreq;
        policy->cpuinfo.min_freq = maxfreq / max_duration;
        policy->cpuinfo.max_freq = maxfreq;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
index 794123fcf3e3ebce87cdbcbf0a66b60f4b70db2c..bf8902a0866dd4d767cdab4ba45d84ca397033fe 100644 (file)
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
        struct device_node *np;
        int ret;
 
-       if (!of_machine_is_compatible("calxeda,highbank"))
+       if ((!of_machine_is_compatible("calxeda,highbank")) &&
+               (!of_machine_is_compatible("calxeda,ecx-2000")))
                return -ENODEV;
 
        cpu_dev = get_cpu_device(0);
index 3e14f03171759bcdbd8a0501bc1ff38fee86d4c0..90c6598415fd39687aa874a85d52730452f26c3d 100644 (file)
@@ -247,22 +247,6 @@ acpi_cpufreq_target (
 }
 
 
-static int
-acpi_cpufreq_verify (
-       struct cpufreq_policy   *policy)
-{
-       unsigned int result = 0;
-       struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
-       pr_debug("acpi_cpufreq_verify\n");
-
-       result = cpufreq_frequency_table_verify(policy,
-                       data->freq_table);
-
-       return (result);
-}
-
-
 static int
 acpi_cpufreq_cpu_init (
        struct cpufreq_policy   *policy)
@@ -321,7 +305,6 @@ acpi_cpufreq_cpu_init (
                            data->acpi_data.states[i].transition_latency * 1000;
                }
        }
-       policy->cur = processor_get_freq(data, policy->cpu);
 
        /* table init */
        for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +318,7 @@ acpi_cpufreq_cpu_init (
                }
        }
 
-       result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, data->freq_table);
        if (result) {
                goto err_freqfree;
        }
@@ -356,8 +339,6 @@ acpi_cpufreq_cpu_init (
                        (u32) data->acpi_data.states[i].status,
                        (u32) data->acpi_data.states[i].control);
 
-       cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
        /* the first call to ->target() should result in us actually
         * writing something to the appropriate registers. */
        data->resume = 1;
@@ -396,20 +377,14 @@ acpi_cpufreq_cpu_exit (
 }
 
 
-static struct freq_attr* acpi_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver acpi_cpufreq_driver = {
-       .verify         = acpi_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = acpi_cpufreq_target,
        .get            = acpi_cpufreq_get,
        .init           = acpi_cpufreq_cpu_init,
        .exit           = acpi_cpufreq_cpu_exit,
        .name           = "acpi-cpufreq",
-       .attr           = acpi_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
index 3e396543aea4f74bb6b2d54e5a839eedf43a2c1a..be23892282e3462947e7df176113070a2b1d8ca7 100644 (file)
@@ -7,12 +7,13 @@
  */
 
 #include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
@@ -34,11 +35,6 @@ static struct device *cpu_dev;
 static struct cpufreq_frequency_table *freq_table;
 static unsigned int transition_latency;
 
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int imx6q_get_speed(unsigned int cpu)
 {
        return clk_get_rate(arm_clk) / 1000;
@@ -48,7 +44,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq_hz, volt, volt_old;
        unsigned int index;
        int ret;
@@ -69,14 +65,14 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
                return 0;
 
        rcu_read_lock();
-       opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
                return PTR_ERR(opp);
        }
 
-       volt = opp_get_voltage(opp);
+       volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        volt_old = regulator_get_voltage(arm_reg);
 
@@ -158,51 +154,31 @@ post_notify:
 
 static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (ret) {
-               dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = clk_get_rate(arm_clk) / 1000;
-       cpumask_setall(policy->cpus);
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static struct freq_attr *imx6q_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver imx6q_cpufreq_driver = {
-       .verify = imx6q_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = imx6q_set_target,
        .get = imx6q_get_speed,
        .init = imx6q_cpufreq_init,
-       .exit = imx6q_cpufreq_exit,
+       .exit = cpufreq_generic_exit,
        .name = "imx6q-cpufreq",
-       .attr = imx6q_cpufreq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int imx6q_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long min_volt, max_volt;
        int num, ret;
 
-       cpu_dev = &pdev->dev;
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               return -ENODEV;
+       }
 
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
@@ -232,14 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
        }
 
        /* We expect an OPP table supplied by platform */
-       num = opp_get_opp_count(cpu_dev);
+       num = dev_pm_opp_get_opp_count(cpu_dev);
        if (num < 0) {
                ret = num;
                dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
                goto put_node;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
                goto put_node;
@@ -254,12 +230,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
         * same order.
         */
        rcu_read_lock();
-       opp = opp_find_freq_exact(cpu_dev,
+       opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                  freq_table[0].frequency * 1000, true);
-       min_volt = opp_get_voltage(opp);
-       opp = opp_find_freq_exact(cpu_dev,
+       min_volt = dev_pm_opp_get_voltage(opp);
+       opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                  freq_table[--num].frequency * 1000, true);
-       max_volt = opp_get_voltage(opp);
+       max_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
        if (ret > 0)
@@ -287,7 +263,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 free_freq_table:
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 put_node:
        of_node_put(np);
        return ret;
@@ -296,7 +272,7 @@ put_node:
 static int imx6q_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&imx6q_cpufreq_driver);
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
        return 0;
 }
index f7c99df0880b4d396fee72860b904b5764f943f3..babf3e40e9fa5030be4420f7038681954ebc5bb8 100644 (file)
@@ -59,9 +59,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
 {
        struct icst_vco vco;
 
-       cpufreq_verify_within_limits(policy, 
-                                    policy->cpuinfo.min_freq, 
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
        policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -69,10 +67,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
        vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
        policy->min = icst_hz(&cclk_params, vco) / 1000;
 
-       cpufreq_verify_within_limits(policy, 
-                                    policy->cpuinfo.min_freq, 
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -186,10 +181,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
 {
 
        /* set default policy and cpuinfo */
-       policy->cpuinfo.max_freq = 160000;
-       policy->cpuinfo.min_freq = 12000;
+       policy->max = policy->cpuinfo.max_freq = 160000;
+       policy->min = policy->cpuinfo.min_freq = 12000;
        policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-       policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
 
        return 0;
 }
index 9733f29ed148f840a35171df38e32cbd1ce7bca8..67a87e01c1d96f6709de25dff2b9cbb590d70b36 100644 (file)
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
 }
 
 struct sample {
-       int core_pct_busy;
+       int32_t core_pct_busy;
        u64 aperf;
        u64 mperf;
        int freq;
@@ -68,7 +68,7 @@ struct _pid {
        int32_t i_gain;
        int32_t d_gain;
        int deadband;
-       int last_err;
+       int32_t last_err;
 };
 
 struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
        pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
 }
 
-static signed int pid_calc(struct _pid *pid, int busy)
+static signed int pid_calc(struct _pid *pid, int32_t busy)
 {
-       signed int err, result;
+       signed int result;
        int32_t pterm, dterm, fp_error;
        int32_t integral_limit;
 
-       err = pid->setpoint - busy;
-       fp_error = int_tofp(err);
+       fp_error = int_tofp(pid->setpoint) - busy;
 
-       if (abs(err) <= pid->deadband)
+       if (abs(fp_error) <= int_tofp(pid->deadband))
                return 0;
 
        pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
        if (pid->integral < -integral_limit)
                pid->integral = -integral_limit;
 
-       dterm = mul_fp(pid->d_gain, (err - pid->last_err));
-       pid->last_err = err;
+       dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
+       pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
 
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
        int max_perf = cpu->pstate.turbo_pstate;
+       int max_perf_adj;
        int min_perf;
        if (limits.no_turbo)
                max_perf = cpu->pstate.max_pstate;
 
-       max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
-       *max = clamp_t(int, max_perf,
+       max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+       *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
        min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
        int max_perf, min_perf;
+       u64 val;
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 
@@ -394,8 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
        trace_cpu_frequency(pstate * 100000, cpu->cpu);
 
        cpu->pstate.current_pstate = pstate;
-       wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+       val = pstate << 8;
+       if (limits.no_turbo)
+               val |= (u64)1 << 32;
 
+       wrmsrl(MSR_IA32_PERF_CTL, val);
 }
 
 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -432,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       core_pct = div64_u64(sample->aperf * 100, sample->mperf);
-       sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
+       core_pct = div64_u64(int_tofp(sample->aperf * 100),
+                            sample->mperf);
+       sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
 
        sample->core_pct_busy = core_pct;
 }
@@ -465,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
-static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
-       int32_t busy_scaled;
        int32_t core_busy, max_pstate, current_pstate;
 
-       core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+       core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
-       busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
-
-       return fp_toint(busy_scaled);
+       return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 }
 
 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 {
-       int busy_scaled;
+       int32_t busy_scaled;
        struct _pid *pid;
        signed int ctl = 0;
        int steps;
@@ -611,9 +613,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
-       cpufreq_verify_within_limits(policy,
-                               policy->cpuinfo.min_freq,
-                               policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
                (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -634,8 +634,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
-       int rc, min_pstate, max_pstate;
        struct cpudata *cpu;
+       int rc;
 
        rc = intel_pstate_init_cpu(policy->cpu);
        if (rc)
@@ -649,9 +649,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
-       intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
-       policy->min = min_pstate * 100000;
-       policy->max = max_pstate * 100000;
+       policy->min = cpu->pstate.min_pstate * 100000;
+       policy->max = cpu->pstate.turbo_pstate * 100000;
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
index ba10658a93949fe9adca0863c7c8926a11fe1676..fff8653c8e9b7087a71a0a885853d7c4cdfd30c0 100644 (file)
@@ -102,11 +102,6 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
-
 static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -125,40 +120,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
 /* Module init and exit code */
 static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 5000; /* 5uS */
-       policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
 }
 
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver kirkwood_cpufreq_driver = {
        .get    = kirkwood_cpufreq_get_cpu_frequency,
-       .verify = kirkwood_cpufreq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = kirkwood_cpufreq_target,
        .init   = kirkwood_cpufreq_cpu_init,
-       .exit   = kirkwood_cpufreq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "kirkwood-cpufreq",
-       .attr   = kirkwood_cpufreq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int kirkwood_cpufreq_probe(struct platform_device *pdev)
index 4ada1cccb0523632e72c016e9a511dd38b08d33d..14df4974fb458a165b3dacaaeb15e843d1af6c82 100644 (file)
@@ -625,12 +625,6 @@ static void longhaul_setup_voltagescaling(void)
 }
 
 
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
 static int longhaul_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
@@ -919,36 +913,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                longhaul_setup_voltagescaling();
 
        policy->cpuinfo.transition_latency = 200000;    /* nsec */
-       policy->cur = calc_speed(longhaul_get_cpu_mult());
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
-       if (ret)
-               return ret;
-
-       cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
 
-       return 0;
+       return cpufreq_table_validate_and_show(policy, longhaul_table);
 }
 
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver longhaul_driver = {
-       .verify = longhaul_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = longhaul_target,
        .get    = longhaul_get,
        .init   = longhaul_cpu_init,
-       .exit   = longhaul_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "longhaul",
-       .attr   = longhaul_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id longhaul_id[] = {
index 5aa031612d5393ff08d196f83b8701ac8c8f5e12..074971b12635af04b7214f510c70e7e8c37ebcbf 100644 (file)
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
                return -EINVAL;
 
        policy->cpu = 0;
-       cpufreq_verify_within_limits(policy,
-               policy->cpuinfo.min_freq,
-               policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
            (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
index 7bc3c44d34e2f0ec5493ad9fdf0c8467ccd8a037..2c8ec8e064490be7d56b4e591960b2e11a4bf813 100644 (file)
@@ -131,40 +131,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       policy->cur = loongson2_cpufreq_get(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
-                                        policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-                                           &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &loongson2_clockmod_table[0]);
+       return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
 }
 
 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
 {
+       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_put(cpuclk);
        return 0;
 }
 
-static struct freq_attr *loongson2_table_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver loongson2_cpufreq_driver = {
        .name = "loongson2",
        .init = loongson2_cpufreq_cpu_init,
-       .verify = loongson2_cpufreq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = loongson2_cpufreq_target,
        .get = loongson2_cpufreq_get,
        .exit = loongson2_cpufreq_exit,
-       .attr = loongson2_table_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static struct platform_device_id platform_device_ids[] = {
index 6168d77b296d78b7d1a452bae6f3ae34c7e036c3..eb1e1766baede24438b948a6f9974d5f6e0877be 100644 (file)
@@ -64,11 +64,6 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *maple_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
@@ -135,11 +130,6 @@ static int maple_scom_query_freq(void)
  * Common interface to the cpufreq core
  */
 
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
 static int maple_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int target_freq, unsigned int relation)
 {
@@ -175,27 +165,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
 
 static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cpuinfo.transition_latency = 12000;
-       policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
-       /* secondary CPUs are tied to the primary one by the
-        * cpufreq core if in the secondary policy we tell it that
-        * it actually must be one policy together with all others. */
-       cpumask_setall(policy->cpus);
-       cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-               maple_cpu_freqs);
+       return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
 }
 
-
 static struct cpufreq_driver maple_cpufreq_driver = {
        .name           = "maple",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = maple_cpufreq_cpu_init,
-       .verify         = maple_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = maple_cpufreq_target,
        .get            = maple_cpufreq_get_speed,
-       .attr           = maple_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init maple_cpufreq_init(void)
index f31fcfcad514330ca856bcfd015aba3927de8241..ac552d090463d729d4ef0b98aebe893c554ae02c 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
 static struct device *mpu_dev;
 static struct regulator *mpu_reg;
 
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
-       if (!freq_table)
-               return -EINVAL;
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int omap_getspeed(unsigned int cpu)
 {
        unsigned long rate;
@@ -65,7 +58,7 @@ static int omap_target(struct cpufreq_policy *policy,
        unsigned int i;
        int r, ret = 0;
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq, volt = 0, volt_old = 0, tol = 0;
 
        if (!freq_table) {
@@ -105,14 +98,14 @@ static int omap_target(struct cpufreq_policy *policy,
 
        if (mpu_reg) {
                rcu_read_lock();
-               opp = opp_find_freq_ceil(mpu_dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
                                __func__, freqs.new);
                        return -EINVAL;
                }
-               volt = opp_get_voltage(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                tol = volt * OPP_TOLERANCE / 100;
                volt_old = regulator_get_voltage(mpu_reg);
@@ -162,86 +155,57 @@ done:
 static inline void freq_table_free(void)
 {
        if (atomic_dec_and_test(&freq_table_users))
-               opp_free_cpufreq_table(mpu_dev, &freq_table);
+               dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
 }
 
 static int omap_cpu_init(struct cpufreq_policy *policy)
 {
-       int result = 0;
+       int result;
 
        mpu_clk = clk_get(NULL, "cpufreq_ck");
        if (IS_ERR(mpu_clk))
                return PTR_ERR(mpu_clk);
 
-       if (policy->cpu >= NR_CPUS) {
-               result = -EINVAL;
-               goto fail_ck;
-       }
-
-       policy->cur = omap_getspeed(policy->cpu);
-
-       if (!freq_table)
-               result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
-       if (result) {
-               dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+       if (!freq_table) {
+               result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+               if (result) {
+                       dev_err(mpu_dev,
+                               "%s: cpu%d: failed creating freq table[%d]\n",
                                __func__, policy->cpu, result);
-               goto fail_ck;
+                       goto fail;
+               }
        }
 
        atomic_inc_return(&freq_table_users);
 
-       result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (result)
-               goto fail_table;
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       policy->cur = omap_getspeed(policy->cpu);
-
-       /*
-        * On OMAP SMP configuartion, both processors share the voltage
-        * and clock. So both CPUs needs to be scaled together and hence
-        * needs software co-ordination. Use cpufreq affected_cpus
-        * interface to handle this scenario. Additional is_smp() check
-        * is to keep SMP_ON_UP build working.
-        */
-       if (is_smp())
-               cpumask_setall(policy->cpus);
-
        /* FIXME: what's the actual transition time? */
-       policy->cpuinfo.transition_latency = 300 * 1000;
-
-       return 0;
+       result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+       if (!result)
+               return 0;
 
-fail_table:
        freq_table_free();
-fail_ck:
+fail:
        clk_put(mpu_clk);
        return result;
 }
 
 static int omap_cpu_exit(struct cpufreq_policy *policy)
 {
+       cpufreq_frequency_table_put_attr(policy->cpu);
        freq_table_free();
        clk_put(mpu_clk);
        return 0;
 }
 
-static struct freq_attr *omap_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver omap_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = omap_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = omap_target,
        .get            = omap_getspeed,
        .init           = omap_cpu_init,
        .exit           = omap_cpu_exit,
        .name           = "omap",
-       .attr           = omap_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int omap_cpufreq_probe(struct platform_device *pdev)
index 2f0a2a65c37f67eeae9789ed12afa20627fd3503..6164c1cca504881d80ecb64d3bfb10e756a17349 100644 (file)
@@ -140,12 +140,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
 }
 
 
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
@@ -230,25 +224,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
                else
                        p4clockmod_table[i].frequency = (stock_freq * i)/8;
        }
-       cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
 
        /* cpuinfo and default policy values */
 
        /* the transition latency is set to be 1 higher than the maximum
         * transition latency of the ondemand governor */
        policy->cpuinfo.transition_latency = 10000001;
-       policy->cur = stock_freq;
 
-       return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+       return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
 }
 
 
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
 static unsigned int cpufreq_p4_get(unsigned int cpu)
 {
        u32 l, h;
@@ -267,19 +253,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
        return stock_freq;
 }
 
-static struct freq_attr *p4clockmod_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver p4clockmod_driver = {
-       .verify         = cpufreq_p4_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = cpufreq_p4_target,
        .init           = cpufreq_p4_cpu_init,
-       .exit           = cpufreq_p4_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .get            = cpufreq_p4_get,
        .name           = "p4-clockmod",
-       .attr           = p4clockmod_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id cpufreq_p4_id[] = {
index 534e43a60d1f9a80b2a952abe254fb01e06c0f12..1cca332728c3c36e2a40a5bc248f1cb39651c741 100644 (file)
@@ -69,11 +69,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
        {0,     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *pas_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /*
  * hardware specific functions
  */
@@ -209,22 +204,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
                pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
        }
 
-       policy->cpuinfo.transition_latency = get_gizmo_latency();
-
        cur_astate = get_cur_astate(policy->cpu);
        pr_debug("current astate is at %d\n",cur_astate);
 
        policy->cur = pas_freqs[cur_astate].frequency;
-       cpumask_copy(policy->cpus, cpu_online_mask);
-
        ppc_proc_freq = policy->cur * 1000ul;
 
-       cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
-       /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
-        * are set correctly
-        */
-       return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+       return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
 out_unmap_sdcpwr:
        iounmap(sdcpwr_mapbase);
@@ -253,11 +239,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
 static int pas_cpufreq_target(struct cpufreq_policy *policy,
                              unsigned int target_freq,
                              unsigned int relation)
@@ -300,9 +281,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = pas_cpufreq_cpu_init,
        .exit           = pas_cpufreq_cpu_exit,
-       .verify         = pas_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pas_cpufreq_target,
-       .attr           = pas_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 /*
index d81c4e5ea0ada8c49afac790f1107bc5ce8f11e6..e2b4f40ff69acaaff51f6461caafd4c8a23bdd16 100644 (file)
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
 
 static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
 {
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void)
        struct pcc_memory_resource *mem_resource;
        struct pcc_register_resource *reg_resource;
        union acpi_object *out_obj, *member;
-       acpi_handle handle, osc_handle, pcch_handle;
+       acpi_handle handle, osc_handle;
        int ret = 0;
 
        status = acpi_get_handle(NULL, "\\_SB", &handle);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       status = acpi_get_handle(handle, "PCCH", &pcch_handle);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(handle, "PCCH"))
                return -ENODEV;
 
        status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
                ioread32(&pcch_hdr->nominal) * 1000;
        policy->min = policy->cpuinfo.min_freq =
                ioread32(&pcch_hdr->minimum_frequency) * 1000;
-       policy->cur = pcc_get_freq(cpu);
-
-       if (!policy->cur) {
-               pr_debug("init: Unable to get current CPU frequency\n");
-               result = -EINVAL;
-               goto out;
-       }
 
        pr_debug("init: policy->max is %d, policy->min is %d\n",
                policy->max, policy->min);
index a096cd3fa23d1aaef19b3ae27ecd87037f01ba6f..6eac1e2300785ed4c3a9dc1d00ea362014061c50 100644 (file)
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static inline void local_delay(unsigned long ms)
 {
        if (no_schedule)
@@ -378,11 +373,6 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
        return cur_freq;
 }
 
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
 static int pmac_cpufreq_target(        struct cpufreq_policy *policy,
                                        unsigned int target_freq,
                                        unsigned int relation)
@@ -402,14 +392,7 @@ static int pmac_cpufreq_target(    struct cpufreq_policy *policy,
 
 static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -ENODEV;
-
-       policy->cpuinfo.transition_latency      = transition_latency;
-       policy->cur = cur_freq;
-
-       cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
-       return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+       return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
 }
 
 static u32 read_gpio(struct device_node *np)
@@ -469,14 +452,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pmac_cpufreq_driver = {
-       .verify         = pmac_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pmac_cpufreq_target,
        .get            = pmac_cpufreq_get_speed,
        .init           = pmac_cpufreq_cpu_init,
        .suspend        = pmac_cpufreq_suspend,
        .resume         = pmac_cpufreq_resume,
        .flags          = CPUFREQ_PM_NO_WARN,
-       .attr           = pmac_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
        .name           = "powermac",
 };
 
index 3a51ad7e47c8c67c95da5f3691d7f89e397c34a6..5261b92d768bb31fa89d60bb2692768d412882d8 100644 (file)
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* g5_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
@@ -142,7 +137,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
                pmf_call_one(pfunc_vdnap0_complete, &args);
                if (done)
                        break;
-               msleep(1);
+               usleep_range(1000, 1000);
        }
        if (done == 0)
                printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +236,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
                if (pfunc_cpu1_volt_low)
                        pmf_call_one(pfunc_cpu1_volt_low, NULL);
        }
-       msleep(10); /* should be faster , to fix */
+       usleep_range(10000, 10000); /* should be faster , to fix */
 }
 
 /*
@@ -286,7 +281,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
                pmf_call_one(pfunc_slewing_done, &args);
                if (done)
                        break;
-               msleep(1);
+               usleep_range(500, 500);
        }
        if (done == 0)
                printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,11 +312,6 @@ static int g5_pfunc_query_freq(void)
  * Common interface to the cpufreq core
  */
 
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
 static int g5_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int target_freq, unsigned int relation)
 {
@@ -357,27 +347,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
 
 static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
-       /* secondary CPUs are tied to the primary one by the
-        * cpufreq core if in the secondary policy we tell it that
-        * it actually must be one policy together with all others. */
-       cpumask_copy(policy->cpus, cpu_online_mask);
-       cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-               g5_cpu_freqs);
+       return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
 }
 
-
 static struct cpufreq_driver g5_cpufreq_driver = {
        .name           = "powermac",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = g5_cpufreq_cpu_init,
-       .verify         = g5_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = g5_cpufreq_target,
        .get            = g5_cpufreq_get_speed,
-       .attr           = g5_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
@@ -397,7 +377,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
        /* Check supported platforms */
        if (of_machine_is_compatible("PowerMac8,1") ||
            of_machine_is_compatible("PowerMac8,2") ||
-           of_machine_is_compatible("PowerMac9,1"))
+           of_machine_is_compatible("PowerMac9,1") ||
+           of_machine_is_compatible("PowerMac12,1"))
                use_volts_smu = 1;
        else if (of_machine_is_compatible("PowerMac11,2"))
                use_volts_vdnap = 1;
@@ -647,8 +628,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        g5_cpu_freqs[0].frequency = max_freq;
        g5_cpu_freqs[1].frequency = min_freq;
 
+       /* Based on a measurement on Xserve G5, rounded up. */
+       transition_latency = 10 * NSEC_PER_MSEC;
+
        /* Set callbacks */
-       transition_latency = CPUFREQ_ETERNAL;
        g5_switch_volt = g5_pfunc_switch_volt;
        g5_switch_freq = g5_pfunc_switch_freq;
        g5_query_freq = g5_pfunc_query_freq;
index 85f1c8c25ddc5d20a361e219d023548b356b835b..eda17024a34adb0e11cd9cb4553d4d0ca7d18164 100644 (file)
@@ -104,19 +104,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
 }
 
 
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
 /**
  * powernow_k6_setpolicy - sets a new CPUFreq policy
  * @policy: new policy
@@ -145,7 +132,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
 static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int i, f;
-       int result;
 
        if (policy->cpu != 0)
                return -ENODEV;
@@ -165,15 +151,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = 200000;
-       policy->cur = busfreq * max_multiplier;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
 
-       return 0;
+       return cpufreq_table_validate_and_show(policy, clock_ratio);
 }
 
 
@@ -195,19 +174,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
        return ret;
 }
 
-static struct freq_attr *powernow_k6_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver powernow_k6_driver = {
-       .verify         = powernow_k6_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernow_k6_target,
        .init           = powernow_k6_cpu_init,
        .exit           = powernow_k6_cpu_exit,
        .get            = powernow_k6_get,
        .name           = "powernow-k6",
-       .attr           = powernow_k6_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id powernow_k6_ids[] = {
index 14ce480be8ab2503515cb1d61b8f30b5892d85d2..44d345bad6fb1d9b93345ea5b80e24c7cfc0b281 100644 (file)
@@ -549,11 +549,6 @@ static int powernow_target(struct cpufreq_policy *policy,
 }
 
 
-static int powernow_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
 /*
  * We use the fact that the bus frequency is somehow
  * a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +673,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency =
                cpufreq_scale(2000000UL, fsb, latency);
 
-       policy->cur = powernow_get(0);
-
-       cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+       return cpufreq_table_validate_and_show(policy, powernow_table);
 }
 
 static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,13 +692,8 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *powernow_table_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver powernow_driver = {
-       .verify         = powernow_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernow_target,
        .get            = powernow_get,
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
@@ -716,7 +702,7 @@ static struct cpufreq_driver powernow_driver = {
        .init           = powernow_cpu_init,
        .exit           = powernow_cpu_exit,
        .name           = "powernow-k7",
-       .attr           = powernow_table_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init powernow_init(void)
index 2344a9ed17f3c38017701e075c945df8ae0f4bdf..298beb742ebb1e62ef743c7e3392bd0f980bc3ba 100644 (file)
@@ -1053,17 +1053,6 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
 }
 
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
-       struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
-       if (!data)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
 struct init_on_cpu {
        struct powernow_k8_data *data;
        int rc;
@@ -1152,11 +1141,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
        cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
 
-       pol->cur = find_khz_freq_from_fid(data->currfid);
-       pr_debug("policy current frequency %d kHz\n", pol->cur);
-
        /* min/max the cpu is capable of */
-       if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+       if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
                printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
                powernow_k8_cpu_exit_acpi(data);
                kfree(data->powernow_table);
@@ -1164,8 +1150,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
                return -EINVAL;
        }
 
-       cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
        pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
                 data->currfid, data->currvid);
 
@@ -1227,20 +1211,15 @@ out:
        return khz;
 }
 
-static struct freq_attr *powernow_k8_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cpufreq_amd64_driver = {
-       .verify         = powernowk8_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernowk8_target,
        .bios_limit     = acpi_processor_get_bios_limit,
        .init           = powernowk8_cpu_init,
        .exit           = powernowk8_cpu_exit,
        .get            = powernowk8_get,
        .name           = "powernow-k8",
-       .attr           = powernow_k8_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static void __request_acpi_cpufreq(void)
index 60e81d524ea8231666210ecca2c97407c0a5a279..a0f562ca292dd6124226329a1202ba00435c5a6a 100644 (file)
@@ -202,7 +202,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        table[i].frequency = CPUFREQ_TABLE_END;
 
        /* set the min and max frequency properly */
-       ret = cpufreq_frequency_table_cpuinfo(policy, table);
+       ret = cpufreq_table_validate_and_show(policy, table);
        if (ret) {
                pr_err("invalid frequency table: %d\n", ret);
                goto err_nomem1;
@@ -217,9 +217,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
                per_cpu(cpu_data, i) = data;
 
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(table, cpu);
        of_node_put(np);
 
        return 0;
@@ -253,14 +250,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       struct cpufreq_frequency_table *table =
-               per_cpu(cpu_data, policy->cpu)->table;
-
-       return cpufreq_frequency_table_verify(policy, table);
-}
-
 static int corenet_cpufreq_target(struct cpufreq_policy *policy,
                unsigned int target_freq, unsigned int relation)
 {
@@ -293,20 +282,15 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static struct freq_attr *corenet_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
        .name           = "ppc_cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = corenet_cpufreq_cpu_init,
        .exit           = __exit_p(corenet_cpufreq_cpu_exit),
-       .verify         = corenet_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = corenet_cpufreq_target,
        .get            = corenet_cpufreq_get_speed,
-       .attr           = corenet_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct of_device_id node_matches[] __initdata = {
index 2e448f0bbdc583465672e7ec40804f1d01467224..38540d1f59390b1306b286bcb36fb34a3980bbcb 100644 (file)
@@ -123,22 +123,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
        cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
-       cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
        /* this ensures that policy->cpuinfo_min
         * and policy->cpuinfo_max are set correctly */
-       return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, cbe_freqs);
+       return cpufreq_table_validate_and_show(policy, cbe_freqs);
 }
 
 static int cbe_cpufreq_target(struct cpufreq_policy *policy,
@@ -176,10 +163,10 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy,
 }
 
 static struct cpufreq_driver cbe_cpufreq_driver = {
-       .verify         = cbe_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = cbe_cpufreq_target,
        .init           = cbe_cpufreq_cpu_init,
-       .exit           = cbe_cpufreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "cbe-cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
 };
index 8749eaf1879338ad331f587c9a716a501ab0c701..29aca574317bd02bb3932bb890da9726ccd8268d 100644 (file)
@@ -262,23 +262,6 @@ static u32 mdrefr_dri(unsigned int freq)
        return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
 }
 
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
-       struct cpufreq_frequency_table *pxa_freqs_table;
-       pxa_freqs_t *pxa_freqs;
-       int ret;
-
-       find_freq_tables(&pxa_freqs_table, &pxa_freqs);
-       ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
-       if (freq_debug)
-               pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
-                        policy->min, policy->max);
-
-       return ret;
-}
-
 static unsigned int pxa_cpufreq_get(unsigned int cpu)
 {
        return get_clk_frequency_khz(0);
@@ -414,8 +397,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 
        /* set default policy and cpuinfo */
        policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-       policy->cur = get_clk_frequency_khz(0);    /* current freq */
-       policy->min = policy->max = policy->cur;
 
        /* Generate pxa25x the run cpufreq_frequency_table struct */
        for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +434,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
                find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
                pr_info("PXA255 cpufreq using %s frequency table\n",
                        pxa255_turbo_table ? "turbo" : "run");
-               cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+               cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+       }
+       else if (cpu_is_pxa27x()) {
+               cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
        }
-       else if (cpu_is_pxa27x())
-               cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
 
        printk(KERN_INFO "PXA CPU frequency change support initialized\n");
 
@@ -464,9 +447,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pxa_cpufreq_driver = {
-       .verify = pxa_verify_policy,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = pxa_set_target,
        .init   = pxa_cpufreq_init,
+       .exit   = cpufreq_generic_exit,
        .get    = pxa_cpufreq_get,
        .name   = "PXA2xx",
 };
index d26306fb00d2e8a749ed044c816533f101defd11..47fbee49d6e594f2dfd6ca1e55ddebbe0b3a2f90 100644 (file)
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
        pxa3xx_freqs_num = num;
        pxa3xx_freqs_table = table;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,11 +150,6 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
                cpu_relax();
 }
 
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
 static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
 {
        return pxa3xx_get_clk_frequency_khz(0);
@@ -206,11 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
        int ret = -EINVAL;
 
        /* set default policy and cpuinfo */
-       policy->cpuinfo.min_freq = 104000;
-       policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+       policy->min = policy->cpuinfo.min_freq = 104000;
+       policy->max = policy->cpuinfo.max_freq =
+               (cpu_is_pxa320()) ? 806000 : 624000;
        policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-       policy->max = pxa3xx_get_clk_frequency_khz(0);
-       policy->cur = policy->min = policy->max;
 
        if (cpu_is_pxa300() || cpu_is_pxa310())
                ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +224,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pxa3xx_cpufreq_driver = {
-       .verify         = pxa3xx_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pxa3xx_cpufreq_set,
        .init           = pxa3xx_cpufreq_init,
+       .exit           = cpufreq_generic_exit,
        .get            = pxa3xx_cpufreq_get,
        .name           = "pxa3xx-cpufreq",
 };
index 22dcb81ef9d0e9069b8fc4d49249713d59d41dd9..26a35d1371574f2e4abef7dd05beea6becb8f2a2 100644 (file)
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
        { 0, CPUFREQ_TABLE_END },
 };
 
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
 static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -486,20 +476,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
                freq++;
        }
 
-       policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
        /* Datasheet says PLL stabalisation time must be at least 300us,
         * so but add some fudge. (reference in LOCKCON0 register description)
         */
-       policy->cpuinfo.transition_latency = (500 * 1000) +
-                                            s3c_freq->regulator_latency;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+       ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+                       (500 * 1000) + s3c_freq->regulator_latency);
        if (ret)
                goto err_freq_table;
 
-       cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
        register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
 
        return 0;
@@ -518,19 +502,14 @@ err_hclk:
        return ret;
 }
 
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver s3c2416_cpufreq_driver = {
        .flags          = 0,
-       .verify         = s3c2416_cpufreq_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s3c2416_cpufreq_set_target,
        .get            = s3c2416_cpufreq_get_speed,
        .init           = s3c2416_cpufreq_driver_init,
        .name           = "s3c2416",
-       .attr           = s3c2416_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init s3c2416_cpufreq_init(void)
index b0f343fcb7eefdfe40b942b202398372a618df44..485088253358d1a5712c9f73cfea9036f65aa156 100644 (file)
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
 
 static int s3c_cpufreq_init(struct cpufreq_policy *policy)
 {
-       printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       policy->cur = s3c_cpufreq_get(0);
-       policy->min = policy->cpuinfo.min_freq = 0;
-       policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-       /* feed the latency information from the cpu driver */
-       policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
-       if (ftab)
-               cpufreq_frequency_table_cpuinfo(policy, ftab);
-
-       return 0;
+       return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
 }
 
 static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
        return 0;
 }
 
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static struct cpufreq_frequency_table suspend_pll;
 static unsigned int suspend_freq;
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver s3c24xx_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = s3c_cpufreq_verify,
        .target         = s3c_cpufreq_target,
        .get            = s3c_cpufreq_get,
        .init           = s3c_cpufreq_init,
index 8a72b0c555f846da21d25d02d9f94d427a7796b5..461617332033de80fb7c1ff774bfd15106670830 100644 (file)
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
 };
 #endif
 
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
 static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
 {
        if (cpu != 0)
@@ -166,7 +158,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
                if (freq->frequency == CPUFREQ_ENTRY_INVALID)
                        continue;
 
-               dvfs = &s3c64xx_dvfs_table[freq->index];
+               dvfs = &s3c64xx_dvfs_table[freq->driver_data];
                found = 0;
 
                for (i = 0; i < count; i++) {
@@ -243,15 +235,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
                freq++;
        }
 
-       policy->cur = clk_get_rate(armclk) / 1000;
-
        /* Datasheet says PLL stabalisation time (if we were to use
         * the PLLs, which we don't currently) is ~300us worst case,
         * but add some fudge.
         */
-       policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+       ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+                       (500 * 1000) + regulator_latency);
        if (ret != 0) {
                pr_err("Failed to configure frequency table: %d\n",
                       ret);
@@ -264,7 +253,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver s3c64xx_cpufreq_driver = {
        .flags          = 0,
-       .verify         = s3c64xx_cpufreq_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s3c64xx_cpufreq_set_target,
        .get            = s3c64xx_cpufreq_get_speed,
        .init           = s3c64xx_cpufreq_driver_init,
index 5c77570737937c298577303904baa60823fdce60..600b4f472e28432ad9ac6b0bab2b1a3d5091104f 100644 (file)
@@ -174,14 +174,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
        __raw_writel(tmp1, reg);
 }
 
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
 static unsigned int s5pv210_getspeed(unsigned int cpu)
 {
        if (cpu)
@@ -551,13 +543,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
        s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
        s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
 
-       policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
-       cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
-       policy->cpuinfo.transition_latency = 40000;
-
-       return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+       return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
 
 out_dmc1:
        clk_put(dmc0_clk);
@@ -605,7 +591,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
 
 static struct cpufreq_driver s5pv210_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = s5pv210_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s5pv210_target,
        .get            = s5pv210_getspeed,
        .init           = s5pv210_cpu_init,
index cff18e87ca58721cc9ae13e432816d712d7d0c2d..b282cea47e628d67551c2366b0d494dda781cff7 100644 (file)
@@ -218,18 +218,12 @@ static int sa1100_target(struct cpufreq_policy *policy,
 
 static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -EINVAL;
-       policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-       policy->cpuinfo.min_freq = 59000;
-       policy->cpuinfo.max_freq = 287000;
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       return 0;
+       return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 static struct cpufreq_driver sa1100_driver __refdata = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = sa11x0_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = sa1100_target,
        .get            = sa11x0_getspeed,
        .init           = sa1100_cpu_init,
index 39c90b6f42865eb8f8950ce0f7d7ce84d463a9ee..bca04c0b4a734369ef5281a6acebb575a2940da1 100644 (file)
@@ -332,20 +332,14 @@ static int sa1110_target(struct cpufreq_policy *policy,
 
 static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -EINVAL;
-       policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-       policy->cpuinfo.min_freq = 59000;
-       policy->cpuinfo.max_freq = 287000;
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       return 0;
+       return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 /* sa1110_driver needs __refdata because it must remain after init registers
  * it with cpufreq_register_driver() */
 static struct cpufreq_driver sa1110_driver __refdata = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = sa11x0_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = sa1110_target,
        .get            = sa11x0_getspeed,
        .init           = sa1110_cpu_init,
index d6f6c6f4efa76ac6f82a97ddeff3292a06cfd18c..9047ab1ca014b9e4b735abc60f244a54d1a74733 100644 (file)
@@ -78,11 +78,6 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
 static int sc520_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -106,7 +101,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
 static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *c = &cpu_data(0);
-       int result;
 
        /* capability check */
        if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +109,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-       policy->cur = sc520_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
-       return 0;
-}
-
 
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, sc520_freq_table);
 }
 
 
-static struct freq_attr *sc520_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver sc520_freq_driver = {
        .get    = sc520_freq_get_cpu_frequency,
-       .verify = sc520_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = sc520_freq_target,
        .init   = sc520_freq_cpu_init,
-       .exit   = sc520_freq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "sc520_freq",
-       .attr   = sc520_freq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id sc520_ids[] = {
index ffc6d24b0cfbed764db6b6c8e2015330e6fb9fbd..387af12503a64e43f15d8fd0fa1d6e108a741f62 100644 (file)
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
        if (freq_table)
                return cpufreq_frequency_table_verify(policy, freq_table);
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
        policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return PTR_ERR(cpuclk);
        }
 
-       policy->cur = sh_cpufreq_get(cpu);
-
        freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
        if (freq_table) {
                int result;
 
-               result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-               if (!result)
-                       cpufreq_frequency_table_get_attr(freq_table, cpu);
+               result = cpufreq_table_validate_and_show(policy, freq_table);
+               if (result)
+                       return result;
        } else {
                dev_notice(dev, "no frequency table found, falling back "
                           "to rate rounding.\n");
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *sh_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver sh_cpufreq_driver = {
        .name           = "sh",
        .get            = sh_cpufreq_get,
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
        .verify         = sh_cpufreq_verify,
        .init           = sh_cpufreq_cpu_init,
        .exit           = sh_cpufreq_cpu_exit,
-       .attr           = sh_freq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init sh_cpufreq_module_init(void)
index cf5bc2ca16fa11f4155d757c6d5219b6691a6b81..291688c1da9acb5883fe3c2339ebbdf216a81696 100644 (file)
@@ -295,12 +295,6 @@ static int us2e_freq_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &us2e_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
@@ -324,13 +318,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = 0;
        policy->cur = clock_tick;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us2e_driver)
+       if (cpufreq_us2e_driver) {
+               cpufreq_frequency_table_put_attr(policy->cpu);
                us2e_set_cpu_divider_index(policy, 0);
+       }
 
        return 0;
 }
@@ -361,7 +357,7 @@ static int __init us2e_freq_init(void)
                        goto err_out;
 
                driver->init = us2e_freq_cpu_init;
-               driver->verify = us2e_freq_verify;
+               driver->verify = cpufreq_generic_frequency_table_verify;
                driver->target = us2e_freq_target;
                driver->get = us2e_freq_get;
                driver->exit = us2e_freq_cpu_exit;
index ac76b489979d48cac1278525ab861f46b69f8c86..9b3dbd31362efaa3f5b3a1dcffe5a34c16f42f49 100644 (file)
@@ -156,12 +156,6 @@ static int us3_freq_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &us3_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
@@ -181,13 +175,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = 0;
        policy->cur = clock_tick;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us3_driver)
+       if (cpufreq_us3_driver) {
+               cpufreq_frequency_table_put_attr(policy->cpu);
                us3_set_cpu_divider_index(policy, 0);
+       }
 
        return 0;
 }
@@ -222,7 +218,7 @@ static int __init us3_freq_init(void)
                        goto err_out;
 
                driver->init = us3_freq_cpu_init;
-               driver->verify = us3_freq_verify;
+               driver->verify = cpufreq_generic_frequency_table_verify;
                driver->target = us3_freq_target;
                driver->get = us3_freq_get;
                driver->exit = us3_freq_cpu_exit;
index 19e364fa59559cd6bf3e0ec77d9e5bb7c391aafa..8841366a2068e2c41898daf982997820a10ed725 100644 (file)
@@ -30,11 +30,6 @@ static struct {
        u32 cnt;
 } spear_cpufreq;
 
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
 static unsigned int spear_cpufreq_get(unsigned int cpu)
 {
        return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -113,7 +108,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
                unsigned int target_freq, unsigned int relation)
 {
        struct cpufreq_freqs freqs;
-       unsigned long newfreq;
+       long newfreq;
        struct clk *srcclk;
        int index, ret, mult = 1;
 
@@ -176,43 +171,19 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
 
 static int spear_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
-       if (ret) {
-               pr_err("cpufreq_frequency_table_cpuinfo() failed");
-               return ret;
-       }
-
-       cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
-       policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
-       policy->cur = spear_cpufreq_get(0);
-
-       cpumask_setall(policy->cpus);
-
-       return 0;
-}
-
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+                       spear_cpufreq.transition_latency);
 }
 
-static struct freq_attr *spear_cpufreq_attr[] = {
-        &cpufreq_freq_attr_scaling_available_freqs,
-        NULL,
-};
-
 static struct cpufreq_driver spear_cpufreq_driver = {
        .name           = "cpufreq-spear",
        .flags          = CPUFREQ_STICKY,
-       .verify         = spear_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = spear_cpufreq_target,
        .get            = spear_cpufreq_get,
        .init           = spear_cpufreq_init,
-       .exit           = spear_cpufreq_exit,
-       .attr           = spear_cpufreq_attr,
+       .exit           = cpufreq_generic_exit,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int spear_cpufreq_driver_init(void)
index f897d510584285bea376474ca6a3198eeef7d74f..25e45f89acac1b550c227c969e9fe9ebf659b077 100644 (file)
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
 static int centrino_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
-       unsigned freq;
        unsigned l, h;
-       int ret;
        int i;
 
        /* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       if (centrino_cpu_init_table(policy)) {
+       if (centrino_cpu_init_table(policy))
                return -ENODEV;
-       }
 
        /* Check to see if Enhanced SpeedStep is enabled, and try to
           enable it if not. */
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                }
        }
 
-       freq = get_cur_freq(policy->cpu);
        policy->cpuinfo.transition_latency = 10000;
                                                /* 10uS transition latency */
-       policy->cur = freq;
-
-       pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
 
-       ret = cpufreq_frequency_table_cpuinfo(policy,
+       return cpufreq_table_validate_and_show(policy,
                per_cpu(centrino_model, policy->cpu)->op_points);
-       if (ret)
-               return (ret);
-
-       cpufreq_frequency_table_get_attr(
-               per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
-       return 0;
 }
 
 static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -427,19 +413,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-/**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                       per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
 /**
  * centrino_setpolicy - set a new CPUFreq policy
  * @policy: new policy
@@ -561,20 +534,15 @@ out:
        return retval;
 }
 
-static struct freq_attr* centrino_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver centrino_driver = {
        .name           = "centrino", /* should be speedstep-centrino,
                                         but there's a 16 char limit */
        .init           = centrino_cpu_init,
        .exit           = centrino_cpu_exit,
-       .verify         = centrino_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = centrino_target,
        .get            = get_cur_freq,
-       .attr           = centrino_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 /*
index 5355abb69afc972d88c9f509924d3e779fa0cdac..1a8b01bd0feca15998092d539a274cce543216ad 100644 (file)
@@ -289,18 +289,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
 struct get_freqs {
        struct cpufreq_policy *policy;
        int ret;
@@ -320,8 +308,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
 
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-       unsigned int policy_cpu, speed;
+       unsigned int policy_cpu;
        struct get_freqs gf;
 
        /* only run on CPU to be set, or on its sibling */
@@ -336,49 +323,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
        if (gf.ret)
                return gf.ret;
 
-       /* get current speed setting */
-       speed = speedstep_get(policy_cpu);
-       if (!speed)
-               return -EIO;
-
-       pr_debug("currently at %s speed setting - %i MHz\n",
-               (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-               ? "low" : "high",
-               (speed / 1000));
-
-       /* cpuinfo and default policy values */
-       policy->cur = speed;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-       return 0;
-}
-
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
-static struct freq_attr *speedstep_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 
 static struct cpufreq_driver speedstep_driver = {
        .name   = "speedstep-ich",
-       .verify = speedstep_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = speedstep_target,
        .init   = speedstep_cpu_init,
-       .exit   = speedstep_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .get    = speedstep_get,
-       .attr   = speedstep_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
index abfba4f731ebdc4212696732a765b56083bc8252..a02b649c9647ef04432a811abb0ade749eb1a823 100644 (file)
@@ -264,23 +264,9 @@ static int speedstep_target(struct cpufreq_policy *policy,
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
        int result;
-       unsigned int speed, state;
        unsigned int *low, *high;
 
        /* capability check */
@@ -316,32 +302,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
                        pr_debug("workaround worked.\n");
        }
 
-       /* get current speed setting */
-       state = speedstep_get_state();
-       speed = speedstep_freqs[state].frequency;
-
-       pr_debug("currently at %s speed setting - %i MHz\n",
-               (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-               ? "low" : "high",
-               (speed / 1000));
-
-       /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = speed;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-       return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
 static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +324,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
        return result;
 }
 
-static struct freq_attr *speedstep_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver speedstep_driver = {
        .name           = "speedstep-smi",
-       .verify         = speedstep_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = speedstep_target,
        .init           = speedstep_cpu_init,
-       .exit           = speedstep_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .get            = speedstep_get,
        .resume         = speedstep_resume,
-       .attr           = speedstep_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
index a7b876fdc1d8a5b672346233d7f7b7ce0a7c962f..32483ef63d5305c064a97ff5aa1280ed1203ff87 100644 (file)
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
 static DEFINE_MUTEX(tegra_cpu_lock);
 static bool is_suspended;
 
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int tegra_getspeed(unsigned int cpu)
 {
        unsigned long rate;
@@ -209,21 +204,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
 
 static int tegra_cpu_init(struct cpufreq_policy *policy)
 {
+       int ret;
+
        if (policy->cpu >= NUM_CPUS)
                return -EINVAL;
 
        clk_prepare_enable(emc_clk);
        clk_prepare_enable(cpu_clk);
 
-       cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-       policy->cur = tegra_getspeed(policy->cpu);
-       target_cpu_speed[policy->cpu] = policy->cur;
+       target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
 
        /* FIXME: what's the actual transition time? */
-       policy->cpuinfo.transition_latency = 300 * 1000;
-
-       cpumask_copy(policy->cpus, cpu_possible_mask);
+       ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+       if (ret) {
+               clk_disable_unprepare(cpu_clk);
+               clk_disable_unprepare(emc_clk);
+               return ret;
+       }
 
        if (policy->cpu == 0)
                register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +230,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
 
 static int tegra_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_cpuinfo(policy, freq_table);
+       cpufreq_frequency_table_put_attr(policy->cpu);
+       clk_disable_unprepare(cpu_clk);
        clk_disable_unprepare(emc_clk);
        return 0;
 }
 
-static struct freq_attr *tegra_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver tegra_cpufreq_driver = {
-       .verify         = tegra_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = tegra_target,
        .get            = tegra_getspeed,
        .init           = tegra_cpu_init,
        .exit           = tegra_cpu_exit,
        .name           = "tegra",
-       .attr           = tegra_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init tegra_cpufreq_init(void)
index b225f04d8ae5c55bcfee26f644812d3d1100cc04..653ae2955b555ad63607d84a4ebeb27be205e644 100644 (file)
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
        if (policy->cpu)
                return -EINVAL;
 
-       cpufreq_verify_within_limits(policy,
-                       policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
 {
        if (policy->cpu != 0)
                return -EINVAL;
-       policy->cur = ucv2_getspeed(0);
        policy->min = policy->cpuinfo.min_freq = 250000;
        policy->max = policy->cpuinfo.max_freq = 1000000;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
index 8e366032230893dc35c0d03f55138a20e0a33f07..f23bd75426cdfcac5da246ef6f23adef0a15b0da 100644 (file)
@@ -2,6 +2,17 @@
 # ARM CPU Idle drivers
 #
 
+config ARM_BIG_LITTLE_CPUIDLE
+       bool "Support for ARM big.LITTLE processors"
+       depends on ARCH_VEXPRESS_TC2_PM
+       select ARM_CPU_SUSPEND
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       help
+         Select this option to enable CPU idle driver for big.LITTLE based
+         ARM systems. Driver manages CPUs coordination through MCPM and
+         define different C-states for little and big cores through the
+         multiple CPU idle drivers infrastructure.
+
 config ARM_HIGHBANK_CPUIDLE
        bool "CPU Idle Driver for Calxeda processors"
        depends on ARCH_HIGHBANK
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE
        help
          Select this to enable cpuidle for ST-E u8500 processors
 
-config CPU_IDLE_BIG_LITTLE
-       bool "Support for ARM big.LITTLE processors"
-       depends on ARCH_VEXPRESS_TC2_PM
-       select ARM_CPU_SUSPEND
-       select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+       bool "Cpu Idle Driver for the AT91 processors"
+       default y
+       depends on ARCH_AT91
        help
-         Select this option to enable CPU idle driver for big.LITTLE based
-         ARM systems. Driver manages CPUs coordination through MCPM and
-         define different C-states for little and big cores through the
-         multiple CPU idle drivers infrastructure.
+         Select this to enable cpuidle for AT91 processors
index cea5ef58876d8202521545008afb9026565fbe7e..527be28e5c1e4e785032a83027899fdf47103b21 100644 (file)
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 
 ##################################################################################
 # ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE)   += cpuidle-big_little.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE)     += cpuidle-calxeda.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE)     += cpuidle-kirkwood.o
 obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)         += cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE)      += cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
similarity index 79%
rename from arch/arm/mach-at91/cpuidle.c
rename to drivers/cpuidle/cpuidle-at91.c
index 4ec6a6d9b9be7e7e0e853ad17a57f2ae2d7b986b..a0774370c6bc41acd38c410beedd55e4592558d0 100644 (file)
 #include <linux/export.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
-#include <mach/cpu.h>
-
-#include "pm.h"
 
 #define AT91_MAX_STATES        2
 
+static void (*at91_standby)(void);
+
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                               int index)
 {
-       if (cpu_is_at91rm9200())
-               at91rm9200_standby();
-       else if (cpu_is_at91sam9g45())
-               at91sam9g45_standby();
-       else if (cpu_is_at91sam9263())
-               at91sam9263_standby();
-       else
-               at91sam9_standby();
-
+       at91_standby();
        return index;
 }
 
@@ -60,9 +51,19 @@ static struct cpuidle_driver at91_idle_driver = {
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init at91_init_cpuidle(void)
+static int at91_cpuidle_probe(struct platform_device *dev)
 {
+       at91_standby = (void *)(dev->dev.platform_data);
+       
        return cpuidle_register(&at91_idle_driver, NULL);
 }
 
-device_initcall(at91_init_cpuidle);
+static struct platform_driver at91_cpuidle_driver = {
+       .driver = {
+               .name = "cpuidle-at91",
+               .owner = THIS_MODULE,
+       },
+       .probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
index e0564652af35114f2d73227b68bf781758975629..5e35804b1a952393dd84b20aacaf96442eac4d62 100644 (file)
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
        .state_count = 2,
 };
 
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
 {
        /* Configure wake up reasons */
        prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
index 38e03a18359156077e17d8f5b35671de47bf6d8e..aded759280282b08e7fb3e9025f130240b9f6074 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/init.h>
 #include <linux/cpu_pm.h>
 #include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = {
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
 {
-       if (!of_machine_is_compatible("xlnx,zynq-7000"))
-               return -ENODEV;
-
        pr_info("Xilinx Zynq CpuIdle Driver started\n");
 
        return cpuidle_register(&zynq_idle_driver, NULL);
 }
 
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+       .driver = {
+               .name = "cpuidle-zynq",
+               .owner = THIS_MODULE,
+       },
+       .probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
index 21180d6cad6e27f2f316a04b1e98fb2e61ac90d3..214357e12dc0b5469bb3c9daae5904ef3a618845 100644 (file)
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
 
 static int support_aes = 1;
 
-static void dev_release(struct device *dev)
-{
-       return;
-}
-
 #define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
-       .name = DRIVER_NAME,
-       .id   = 0,
-       .num_resources = 0,
-       .dev  = {
-               .coherent_dma_mask = DMA_BIT_MASK(32),
-               .release = dev_release,
-       }
-};
 
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
 
 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 {
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 
 static int setup_crypt_desc(void)
 {
+       struct device *dev = &pdev->dev;
        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
        crypt_virt = dma_alloc_coherent(dev,
                        NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
 
 static void one_packet(dma_addr_t phys)
 {
+       struct device *dev = &pdev->dev;
        struct crypt_ctl *crypt;
        struct ixp_ctx *ctx;
        int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
        tasklet_schedule(&crypto_done_tasklet);
 }
 
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
 {
        int ret = -ENODEV;
        u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
        return ret;
 }
 
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
 {
        qmgr_disable_irq(RECV_QID);
        tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
        struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
        struct buffer_desc src_hook;
+       struct device *dev = &pdev->dev;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
 
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
        unsigned int cryptlen;
        struct buffer_desc *buf, src_hook;
        struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct device *dev = &pdev->dev;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
 
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
 } };
 
 #define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+       .name           = DRIVER_NAME,
+       .id             = 0,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
 static int __init ixp_module_init(void)
 {
        int num = ARRAY_SIZE(ixp4xx_algos);
-       int i,err ;
+       int i, err ;
 
-       if (platform_device_register(&pseudo_dev))
-               return -ENODEV;
+       pdev = platform_device_register_full(&ixp_dev_info);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       dev = &pdev->dev;
 
        spin_lock_init(&desc_lock);
        spin_lock_init(&emerg_lock);
 
-       err = init_ixp_crypto();
+       err = init_ixp_crypto(&pdev->dev);
        if (err) {
-               platform_device_unregister(&pseudo_dev);
+               platform_device_unregister(pdev);
                return err;
        }
        for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
                if (ixp4xx_algos[i].registered)
                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
        }
-       release_ixp_crypto();
-       platform_device_unregister(&pseudo_dev);
+       release_ixp_crypto(&pdev->dev);
+       platform_device_unregister(pdev);
 }
 
 module_init(ixp_module_init);
index c99c00d35d34f73d7ceca6cbf1e68b163fcc502a..2e23b12c350b8759c71ae1d5a438c9aaf2089166 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/workqueue.h>
 #include <linux/platform_device.h>
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d,
 {
        struct devfreq *df = to_devfreq(d);
        struct device *dev = df->dev.parent;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        ssize_t count = 0;
        unsigned long freq = 0;
 
        rcu_read_lock();
        do {
-               opp = opp_find_freq_ceil(dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
                if (IS_ERR(opp))
                        break;
 
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
-                                   u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+                                          unsigned long *freq,
+                                          u32 flags)
 {
-       struct opp *opp;
+       struct dev_pm_opp *opp;
 
        if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
                /* The freq is an upper bound. opp should be lower */
-               opp = opp_find_freq_floor(dev, freq);
+               opp = dev_pm_opp_find_freq_floor(dev, freq);
 
                /* If not available, use the closest opp */
                if (opp == ERR_PTR(-ERANGE))
-                       opp = opp_find_freq_ceil(dev, freq);
+                       opp = dev_pm_opp_find_freq_ceil(dev, freq);
        } else {
                /* The freq is an lower bound. opp should be higher */
-               opp = opp_find_freq_ceil(dev, freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, freq);
 
                /* If not available, use the closest opp */
                if (opp == ERR_PTR(-ERANGE))
-                       opp = opp_find_freq_floor(dev, freq);
+                       opp = dev_pm_opp_find_freq_floor(dev, freq);
        }
 
        return opp;
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
        int ret = 0;
 
        rcu_read_lock();
-       nh = opp_get_notifier(dev);
+       nh = dev_pm_opp_get_notifier(dev);
        if (IS_ERR(nh))
                ret = PTR_ERR(nh);
        rcu_read_unlock();
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
        int ret = 0;
 
        rcu_read_lock();
-       nh = opp_get_notifier(dev);
+       nh = dev_pm_opp_get_notifier(dev);
        if (IS_ERR(nh))
                ret = PTR_ERR(nh);
        rcu_read_unlock();
index c5f86d8caca34c6a7a3ea2341da5c242ddf41c83..cede6f71cd63feb5afebde508199b9be9a507778 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data *data = platform_get_drvdata(pdev);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq;
        unsigned long old_freq = data->curr_oppinfo.rate;
        struct busfreq_opp_info new_oppinfo;
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                rcu_read_unlock();
                return PTR_ERR(opp);
        }
-       new_oppinfo.rate = opp_get_freq(opp);
-       new_oppinfo.volt = opp_get_voltage(opp);
+       new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+       new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        freq = new_oppinfo.rate;
 
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data)
                exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
 
        for (i = LV_0; i < EX4210_LV_NUM; i++) {
-               err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+               err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
                              exynos4210_busclk_table[i].volt);
                if (err) {
                        dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
        }
 
        for (i = 0; i < EX4x12_LV_NUM; i++) {
-               ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+               ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
                              exynos4x12_mifclk_table[i].volt);
                if (ret) {
                        dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
 {
        struct busfreq_data *data = container_of(this, struct busfreq_data,
                                                 pm_notifier);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct busfreq_opp_info new_oppinfo;
        unsigned long maxfreq = ULONG_MAX;
        int err = 0;
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                data->disabled = true;
 
                rcu_read_lock();
-               opp = opp_find_freq_floor(data->dev, &maxfreq);
+               opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                        mutex_unlock(&data->lock);
                        return PTR_ERR(opp);
                }
-               new_oppinfo.rate = opp_get_freq(opp);
-               new_oppinfo.volt = opp_get_voltage(opp);
+               new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+               new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
 
                err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@ unlock:
 static int exynos4_busfreq_probe(struct platform_device *pdev)
 {
        struct busfreq_data *data;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct device *dev = &pdev->dev;
        int err = 0;
 
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
        }
 
        rcu_read_lock();
-       opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+       opp = dev_pm_opp_find_freq_floor(dev,
+                                        &exynos4_devfreq_profile.initial_freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                dev_err(dev, "Invalid initial frequency %lu kHz.\n",
                        exynos4_devfreq_profile.initial_freq);
                return PTR_ERR(opp);
        }
-       data->curr_oppinfo.rate = opp_get_freq(opp);
-       data->curr_oppinfo.volt = opp_get_voltage(opp);
+       data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+       data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
 
        platform_set_drvdata(pdev, data);
index 574b16b59be5df13352ebee4f952e592b9a6f52f..9e3752dac99e5115bbe2df05694c06e8ca7fe2a8 100644 (file)
 #include <linux/module.h>
 #include <linux/devfreq.h>
 #include <linux/io.h>
-#include <linux/opp.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
+#include <linux/pm_opp.h>
 #include <linux/pm_qos.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of_address.h>
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data_int *data = platform_get_drvdata(pdev);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long old_freq, freq;
        unsigned long volt;
 
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
                return PTR_ERR(opp);
        }
 
-       freq = opp_get_freq(opp);
-       volt = opp_get_voltage(opp);
+       freq = dev_pm_opp_get_freq(opp);
+       volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
 
        old_freq = data->curr_freq;
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data)
        int i, err = 0;
 
        for (i = LV_0; i < _LV_END; i++) {
-               err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+               err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
                                exynos5_int_opp_table[i].volt);
                if (err) {
                        dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
 {
        struct busfreq_data_int *data = container_of(this,
                                        struct busfreq_data_int, pm_notifier);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long maxfreq = ULONG_MAX;
        unsigned long freq;
        unsigned long volt;
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
                data->disabled = true;
 
                rcu_read_lock();
-               opp = opp_find_freq_floor(data->dev, &maxfreq);
+               opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        err = PTR_ERR(opp);
                        goto unlock;
                }
-               freq = opp_get_freq(opp);
-               volt = opp_get_voltage(opp);
+               freq = dev_pm_opp_get_freq(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
 
                err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@ unlock:
 static int exynos5_busfreq_int_probe(struct platform_device *pdev)
 {
        struct busfreq_data_int *data;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct device *dev = &pdev->dev;
        struct device_node *np;
        unsigned long initial_freq;
@@ -368,7 +367,7 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
        }
 
        rcu_read_lock();
-       opp = opp_find_freq_floor(dev,
+       opp = dev_pm_opp_find_freq_floor(dev,
                        &exynos5_devfreq_int_profile.initial_freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
@@ -377,8 +376,8 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
                err = PTR_ERR(opp);
                goto err_opp_add;
        }
-       initial_freq = opp_get_freq(opp);
-       initial_volt = opp_get_voltage(opp);
+       initial_freq = dev_pm_opp_get_freq(opp);
+       initial_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        data->curr_freq = initial_freq;
 
index 526ec77c7ba032b9af1c6772484028051a4950ef..f238cfd33847ec3c5333158ea39dac72245c8eee 100644 (file)
@@ -198,6 +198,7 @@ config TI_EDMA
        depends on ARCH_DAVINCI || ARCH_OMAP
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
+       select TI_PRIV_EDMA
        default n
        help
          Enable support for the TI EDMA controller. This DMA
index fce46c5bf1c74e3d76accde7570ffa2d423eb9f1..8c56d7856cb20fb28b6b3461b71397e78ca4edd8 100644 (file)
@@ -1252,7 +1252,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
        size_t bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        /*
@@ -1267,7 +1267,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
 
        spin_lock_irqsave(&plchan->vc.lock, flags);
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
+       if (ret != DMA_COMPLETE) {
                vd = vchan_find_desc(&plchan->vc, cookie);
                if (vd) {
                        /* On the issued list, so hasn't been processed yet */
@@ -2055,6 +2055,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        if (ret)
                return ret;
 
+       /* Ensure that we can do DMA */
+       ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out_no_pl08x;
+
        /* Create the driver state holder */
        pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
        if (!pl08x) {
@@ -2133,8 +2138,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
        writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
 
-       ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
-                         DRIVER_NAME, pl08x);
+       ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
        if (ret) {
                dev_err(&adev->dev, "%s failed to request interrupt %d\n",
                        __func__, adev->irq[0]);
index c787f38a186a008a6cf8fa4af1dc9d19cab8f836..1ef74579447d5c0cc201cab4425748be75368eef 100644 (file)
@@ -1102,7 +1102,7 @@ atc_tx_status(struct dma_chan *chan,
        int bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
        /*
         * There's no point calculating the residue if there's
index 31011d2a26fcfa0510b64c1114c1f87c05762ec3..3c6716e0b78eee2592a8c977550ce036ab4318c5 100644 (file)
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+       err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
                               "coh901318", base);
        if (err)
                return err;
index 7c82b92f9b16f18bf1ce4e6bedb988f5d5b70e22..278b3058919a9f9de2fbc113d221d0f835c249a2 100644 (file)
@@ -353,7 +353,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
 
        /* lock */
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (txstate && ret == DMA_SUCCESS)
+       if (txstate && ret == DMA_COMPLETE)
                txstate->residue = c->residue;
        /* unlock */
 
@@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
        }
 }
 
-static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
 {
        struct cppi41_channel *cchan;
        int i;
        int ret;
        u32 n_chans;
 
-       ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+       ret = of_property_read_u32(dev->of_node, "#dma-channels",
                        &n_chans);
        if (ret)
                return ret;
@@ -719,7 +719,7 @@ err:
        return -ENOMEM;
 }
 
-static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
 {
        unsigned int mem_decs;
        int i;
@@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
                cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
                cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
 
-               dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+               dma_free_coherent(dev, mem_decs, cdd->cd,
                                cdd->descs_phys);
        }
 }
@@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
        cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
 }
 
-static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
 {
        disable_sched(cdd);
 
-       purge_descs(pdev, cdd);
+       purge_descs(dev, cdd);
 
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+       dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
                        cdd->scratch_phys);
 }
 
-static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
 {
        unsigned int desc_size;
        unsigned int mem_decs;
@@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
                reg |= ilog2(ALLOC_DECS_NUM) - 5;
 
                BUILD_BUG_ON(DESCS_AREAS != 1);
-               cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+               cdd->cd = dma_alloc_coherent(dev, mem_decs,
                                &cdd->descs_phys, GFP_KERNEL);
                if (!cdd->cd)
                        return -ENOMEM;
@@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
        cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
 }
 
-static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
 {
        int ret;
 
        BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
-       cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+       cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
                        &cdd->scratch_phys, GFP_KERNEL);
        if (!cdd->qmgr_scratch)
                return -ENOMEM;
@@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
        cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
 
-       ret = init_descs(pdev, cdd);
+       ret = init_descs(dev, cdd);
        if (ret)
                goto err_td;
 
@@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
        init_sched(cdd);
        return 0;
 err_td:
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(dev, cdd);
        return ret;
 }
 
@@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
 
-static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
 {
        const struct of_device_id *of_id;
 
-       of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+       of_id = of_match_node(cppi41_dma_ids, dev->of_node);
        if (!of_id)
                return NULL;
        return of_id->data;
@@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
 static int cppi41_dma_probe(struct platform_device *pdev)
 {
        struct cppi41_dd *cdd;
+       struct device *dev = &pdev->dev;
        const struct cppi_glue_infos *glue_info;
        int irq;
        int ret;
 
-       glue_info = get_glue_info(pdev);
+       glue_info = get_glue_info(dev);
        if (!glue_info)
                return -EINVAL;
 
@@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
        cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
        cdd->ddev.device_control = cppi41_dma_control;
-       cdd->ddev.dev = &pdev->dev;
+       cdd->ddev.dev = dev;
        INIT_LIST_HEAD(&cdd->ddev.channels);
        cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
 
-       cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
-       cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
-       cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
-       cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+       cdd->usbss_mem = of_iomap(dev->of_node, 0);
+       cdd->ctrl_mem = of_iomap(dev->of_node, 1);
+       cdd->sched_mem = of_iomap(dev->of_node, 2);
+       cdd->qmgr_mem = of_iomap(dev->of_node, 3);
 
        if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
                        !cdd->qmgr_mem) {
@@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
                goto err_remap;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret)
                goto err_get_sync;
 
@@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        cdd->queues_tx = glue_info->queues_tx;
        cdd->td_queue = glue_info->td_queue;
 
-       ret = init_cppi41(pdev, cdd);
+       ret = init_cppi41(dev, cdd);
        if (ret)
                goto err_init_cppi;
 
-       ret = cppi41_add_chans(pdev, cdd);
+       ret = cppi41_add_chans(dev, cdd);
        if (ret)
                goto err_chans;
 
-       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       irq = irq_of_parse_and_map(dev->of_node, 0);
        if (!irq)
                goto err_irq;
 
        cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
 
        ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
-                       dev_name(&pdev->dev), cdd);
+                       dev_name(dev), cdd);
        if (ret)
                goto err_irq;
        cdd->irq = irq;
@@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_dma_reg;
 
-       ret = of_dma_controller_register(pdev->dev.of_node,
+       ret = of_dma_controller_register(dev->of_node,
                        cppi41_dma_xlate, &cpp41_dma_info);
        if (ret)
                goto err_of;
@@ -1009,11 +1010,11 @@ err_irq:
        cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
        cleanup_chans(cdd);
 err_chans:
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(dev, cdd);
 err_init_cppi:
-       pm_runtime_put(&pdev->dev);
+       pm_runtime_put(dev);
 err_get_sync:
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_disable(dev);
        iounmap(cdd->usbss_mem);
        iounmap(cdd->ctrl_mem);
        iounmap(cdd->sched_mem);
@@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
        cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
        free_irq(cdd->irq, cdd);
        cleanup_chans(cdd);
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(&pdev->dev, cdd);
        iounmap(cdd->usbss_mem);
        iounmap(cdd->ctrl_mem);
        iounmap(cdd->sched_mem);
@@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int cppi41_suspend(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+       cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+       disable_sched(cdd);
+
+       return 0;
+}
+
+static int cppi41_resume(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       int i;
+
+       for (i = 0; i < DESCS_AREAS; i++)
+               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+       init_sched(cdd);
+       cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+
 static struct platform_driver cpp41_dma_driver = {
        .probe  = cppi41_dma_probe,
        .remove = cppi41_dma_remove,
        .driver = {
                .name = "cppi41-dma-engine",
                .owner = THIS_MODULE,
+               .pm = &cppi41_pm_ops,
                .of_match_table = of_match_ptr(cppi41_dma_ids),
        },
 };
index b0c0c8268d42bb023ac94ca8f420e22a5a197c37..94c380f0753860c4c4002c19f4217332fa2460f0 100644 (file)
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
        unsigned long flags;
 
        status = dma_cookie_status(c, cookie, state);
-       if (status == DMA_SUCCESS || !state)
+       if (status == DMA_COMPLETE || !state)
                return status;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
index 9162ac80c18f303ac9a509eb97298eba33d4753b..81d876528c70d60152628b04bb41b08d860ec565 100644 (file)
@@ -1062,7 +1062,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 
        if (!tx)
-               return DMA_SUCCESS;
+               return DMA_COMPLETE;
 
        while (tx->cookie == -EBUSY) {
                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
index 92f796cdc6ab1dc12c6b895fb2d6e16e5dd92264..59e287f56dfca28bb0ff40fc79b6c0e612af58ac 100644 (file)
@@ -740,7 +740,7 @@ static int dmatest_func(void *data)
                                          len, 0);
                        failed_tests++;
                        continue;
-               } else if (status != DMA_SUCCESS) {
+               } else if (status != DMA_COMPLETE) {
                        enum dmatest_error_type type = (status == DMA_ERROR) ?
                                DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
                        thread_result_add(info, result, type,
index 89eb89f222846e0ff5d20cfc5e14619fc05d6600..2c29331571e4a142f2f2b965a15ebbc26657bb2a 100644 (file)
@@ -1098,13 +1098,13 @@ dwc_tx_status(struct dma_chan *chan,
        enum dma_status         ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS)
+       if (ret != DMA_COMPLETE)
                dma_set_residue(txstate, dwc_get_residue(dwc));
 
        if (dwc->paused && ret == DMA_IN_PROGRESS)
index e35d97590311329fe1f7bd93be5cc4b845f3a7c2..453822cc4f9d3a9a9c7b62626c7362c13ee8b931 100644 (file)
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
        if (IS_ERR(chip->regs))
                return PTR_ERR(chip->regs);
 
-       /* Apply default dma_mask if needed */
-       if (!dev->dma_mask) {
-               dev->dma_mask = &dev->coherent_dma_mask;
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       }
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        pdata = dev_get_platdata(dev);
        if (!pdata)
index ff50ff4c6a57148c3a015a6ba3ebb1c81f69c6f8..57c3f3e5321257a11e26b9e2277213b3c65f64a2 100644 (file)
 #define EDMA_CHANS     64
 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
 
-/* Max of 16 segments per channel to conserve PaRAM slots */
-#define MAX_NR_SG              16
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG              20
 #define EDMA_MAX_SLOTS         MAX_NR_SG
 #define EDMA_DESCRIPTORS       16
 
@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        return ret;
 }
 
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+       dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+       enum dma_slave_buswidth dev_width, unsigned int dma_length,
+       enum dma_transfer_direction direction)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = chan->device->dev;
+       int acnt, bcnt, ccnt, cidx;
+       int src_bidx, dst_bidx, src_cidx, dst_cidx;
+       int absync;
+
+       acnt = dev_width;
+       /*
+        * If the maxburst is equal to the fifo width, use
+        * A-synced transfers. This allows for large contiguous
+        * buffer transfers using only one PaRAM set.
+        */
+       if (burst == 1) {
+               /*
+                * For the A-sync case, bcnt and ccnt are the remainder
+                * and quotient respectively of the division of:
+                * (dma_length / acnt) by (SZ_64K -1). This is so
+                * that in case bcnt over flows, we have ccnt to use.
+                * Note: In A-sync tranfer only, bcntrld is used, but it
+                * only applies for sg_dma_len(sg) >= SZ_64K.
+                * In this case, the best way adopted is- bccnt for the
+                * first frame will be the remainder below. Then for
+                * every successive frame, bcnt will be SZ_64K-1. This
+                * is assured as bcntrld = 0xffff in end of function.
+                */
+               absync = false;
+               ccnt = dma_length / acnt / (SZ_64K - 1);
+               bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+               /*
+                * If bcnt is non-zero, we have a remainder and hence an
+                * extra frame to transfer, so increment ccnt.
+                */
+               if (bcnt)
+                       ccnt++;
+               else
+                       bcnt = SZ_64K - 1;
+               cidx = acnt;
+       } else {
+               /*
+                * If maxburst is greater than the fifo address_width,
+                * use AB-synced transfers where A count is the fifo
+                * address_width and B count is the maxburst. In this
+                * case, we are limited to transfers of C count frames
+                * of (address_width * maxburst) where C count is limited
+                * to SZ_64K-1. This places an upper bound on the length
+                * of an SG segment that can be handled.
+                */
+               absync = true;
+               bcnt = burst;
+               ccnt = dma_length / (acnt * bcnt);
+               if (ccnt > (SZ_64K - 1)) {
+                       dev_err(dev, "Exceeded max SG segment size\n");
+                       return -EINVAL;
+               }
+               cidx = acnt * bcnt;
+       }
+
+       if (direction == DMA_MEM_TO_DEV) {
+               src_bidx = acnt;
+               src_cidx = cidx;
+               dst_bidx = 0;
+               dst_cidx = 0;
+       } else if (direction == DMA_DEV_TO_MEM)  {
+               src_bidx = 0;
+               src_cidx = 0;
+               dst_bidx = acnt;
+               dst_cidx = cidx;
+       } else {
+               dev_err(dev, "%s: direction not implemented yet\n", __func__);
+               return -EINVAL;
+       }
+
+       pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+       /* Configure A or AB synchronized transfers */
+       if (absync)
+               pset->opt |= SYNCDIM;
+
+       pset->src = src_addr;
+       pset->dst = dst_addr;
+
+       pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+       pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+       pset->a_b_cnt = bcnt << 16 | acnt;
+       pset->ccnt = ccnt;
+       /*
+        * Only time when (bcntrld) auto reload is required is for
+        * A-sync case, and in this case, a requirement of reload value
+        * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+        * and then later will be populated by edma_execute.
+        */
+       pset->link_bcntrld = 0xffffffff;
+       return absync;
+}
+
 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl,
        unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
        struct edma_chan *echan = to_edma_chan(chan);
        struct device *dev = chan->device->dev;
        struct edma_desc *edesc;
-       dma_addr_t dev_addr;
+       dma_addr_t src_addr = 0, dst_addr = 0;
        enum dma_slave_buswidth dev_width;
        u32 burst;
        struct scatterlist *sg;
-       int acnt, bcnt, ccnt, src, dst, cidx;
-       int src_bidx, dst_bidx, src_cidx, dst_cidx;
-       int i, nslots;
+       int i, nslots, ret;
 
        if (unlikely(!echan || !sgl || !sg_len))
                return NULL;
 
        if (direction == DMA_DEV_TO_MEM) {
-               dev_addr = echan->cfg.src_addr;
+               src_addr = echan->cfg.src_addr;
                dev_width = echan->cfg.src_addr_width;
                burst = echan->cfg.src_maxburst;
        } else if (direction == DMA_MEM_TO_DEV) {
-               dev_addr = echan->cfg.dst_addr;
+               dst_addr = echan->cfg.dst_addr;
                dev_width = echan->cfg.dst_addr_width;
                burst = echan->cfg.dst_maxburst;
        } else {
@@ -306,6 +421,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                                                EDMA_SLOT_ANY);
                        if (echan->slot[i] < 0) {
                                dev_err(dev, "Failed to allocate slot\n");
+                               kfree(edesc);
                                return NULL;
                        }
                }
@@ -313,63 +429,19 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
 
        /* Configure PaRAM sets for each SG */
        for_each_sg(sgl, sg, sg_len, i) {
+               /* Get address for each SG */
+               if (direction == DMA_DEV_TO_MEM)
+                       dst_addr = sg_dma_address(sg);
+               else
+                       src_addr = sg_dma_address(sg);
 
-               acnt = dev_width;
+               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+                                      dst_addr, burst, dev_width,
+                                      sg_dma_len(sg), direction);
+               if (ret < 0)
+                       return NULL;
 
-               /*
-                * If the maxburst is equal to the fifo width, use
-                * A-synced transfers. This allows for large contiguous
-                * buffer transfers using only one PaRAM set.
-                */
-               if (burst == 1) {
-                       edesc->absync = false;
-                       ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
-                       bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
-                       if (bcnt)
-                               ccnt++;
-                       else
-                               bcnt = SZ_64K - 1;
-                       cidx = acnt;
-               /*
-                * If maxburst is greater than the fifo address_width,
-                * use AB-synced transfers where A count is the fifo
-                * address_width and B count is the maxburst. In this
-                * case, we are limited to transfers of C count frames
-                * of (address_width * maxburst) where C count is limited
-                * to SZ_64K-1. This places an upper bound on the length
-                * of an SG segment that can be handled.
-                */
-               } else {
-                       edesc->absync = true;
-                       bcnt = burst;
-                       ccnt = sg_dma_len(sg) / (acnt * bcnt);
-                       if (ccnt > (SZ_64K - 1)) {
-                               dev_err(dev, "Exceeded max SG segment size\n");
-                               return NULL;
-                       }
-                       cidx = acnt * bcnt;
-               }
-
-               if (direction == DMA_MEM_TO_DEV) {
-                       src = sg_dma_address(sg);
-                       dst = dev_addr;
-                       src_bidx = acnt;
-                       src_cidx = cidx;
-                       dst_bidx = 0;
-                       dst_cidx = 0;
-               } else {
-                       src = dev_addr;
-                       dst = sg_dma_address(sg);
-                       src_bidx = 0;
-                       src_cidx = 0;
-                       dst_bidx = acnt;
-                       dst_cidx = cidx;
-               }
-
-               edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
-               /* Configure A or AB synchronized transfers */
-               if (edesc->absync)
-                       edesc->pset[i].opt |= SYNCDIM;
+               edesc->absync = ret;
 
                /* If this is the last in a current SG set of transactions,
                   enable interrupts so that next set is processed */
@@ -379,17 +451,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                /* If this is the last set, enable completion interrupt flag */
                if (i == sg_len - 1)
                        edesc->pset[i].opt |= TCINTEN;
-
-               edesc->pset[i].src = src;
-               edesc->pset[i].dst = dst;
-
-               edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
-               edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
-
-               edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
-               edesc->pset[i].ccnt = ccnt;
-               edesc->pset[i].link_bcntrld = 0xffffffff;
-
        }
 
        return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -576,7 +637,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
        unsigned long flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS || !txstate)
+       if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
        spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -631,6 +692,10 @@ static int edma_probe(struct platform_device *pdev)
        struct edma_cc *ecc;
        int ret;
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
        if (!ecc) {
                dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -702,11 +767,13 @@ static struct platform_device *pdev0, *pdev1;
 static const struct platform_device_info edma_dev_info0 = {
        .name = "edma-dma-engine",
        .id = 0,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct platform_device_info edma_dev_info1 = {
        .name = "edma-dma-engine",
        .id = 1,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static int edma_init(void)
@@ -720,8 +787,6 @@ static int edma_init(void)
                        ret = PTR_ERR(pdev0);
                        goto out;
                }
-               pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
-               pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
 
        if (EDMA_CTLRS == 2) {
@@ -731,8 +796,6 @@ static int edma_init(void)
                        platform_device_unregister(pdev0);
                        ret = PTR_ERR(pdev1);
                }
-               pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
-               pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
 
 out:
@@ -749,6 +812,6 @@ static void __exit edma_exit(void)
 }
 module_exit(edma_exit);
 
-MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
+MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
 MODULE_LICENSE("GPL v2");
index 78f8ca5fccee91e7a422c419df4ee0ab427d4ad4..2af4028cc23e7abb1c5e39a41b54012b588c9490 100644 (file)
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
        struct imxdma_engine *imxdma = imxdmac->imxdma;
        int chno = imxdmac->channel;
        struct imxdma_desc *desc;
+       unsigned long flags;
 
-       spin_lock(&imxdma->lock);
+       spin_lock_irqsave(&imxdma->lock, flags);
        if (list_empty(&imxdmac->ld_active)) {
-               spin_unlock(&imxdma->lock);
+               spin_unlock_irqrestore(&imxdma->lock, flags);
                goto out;
        }
 
        desc = list_first_entry(&imxdmac->ld_active,
                                struct imxdma_desc,
                                node);
-       spin_unlock(&imxdma->lock);
+       spin_unlock_irqrestore(&imxdma->lock, flags);
 
        if (desc->sg) {
                u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
        struct imxdma_engine *imxdma = imxdmac->imxdma;
-       unsigned long flags;
        int slot = -1;
        int i;
 
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
        switch (d->type) {
        case IMXDMA_DESC_INTERLEAVED:
                /* Try to get a free 2D slot */
-               spin_lock_irqsave(&imxdma->lock, flags);
                for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
                        if ((imxdma->slots_2d[i].count > 0) &&
                        ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
                        slot = i;
                        break;
                }
-               if (slot < 0) {
-                       spin_unlock_irqrestore(&imxdma->lock, flags);
+               if (slot < 0)
                        return -EBUSY;
-               }
 
                imxdma->slots_2d[slot].xsr = d->x;
                imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
 
                imxdmac->slot_2d = slot;
                imxdmac->enabled_2d = true;
-               spin_unlock_irqrestore(&imxdma->lock, flags);
 
                if (slot == IMX_DMA_2D_SLOT_A) {
                        d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
        struct imxdma_channel *imxdmac = (void *)data;
        struct imxdma_engine *imxdma = imxdmac->imxdma;
        struct imxdma_desc *desc;
+       unsigned long flags;
 
-       spin_lock(&imxdma->lock);
+       spin_lock_irqsave(&imxdma->lock, flags);
 
        if (list_empty(&imxdmac->ld_active)) {
                /* Someone might have called terminate all */
-               goto out;
+               spin_unlock_irqrestore(&imxdma->lock, flags);
+               return;
        }
        desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
 
-       if (desc->desc.callback)
-               desc->desc.callback(desc->desc.callback_param);
-
        /* If we are dealing with a cyclic descriptor, keep it on ld_active
         * and dont mark the descriptor as complete.
         * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
                                 __func__, imxdmac->channel);
        }
 out:
-       spin_unlock(&imxdma->lock);
+       spin_unlock_irqrestore(&imxdma->lock, flags);
+
+       if (desc->desc.callback)
+               desc->desc.callback(desc->desc.callback_param);
+
 }
 
 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -772,7 +771,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
                desc->desc.tx_submit = imxdma_tx_submit;
                /* txd.flags will be overwritten in prep funcs */
                desc->desc.flags = DMA_CTRL_ACK;
-               desc->status = DMA_SUCCESS;
+               desc->status = DMA_COMPLETE;
 
                list_add_tail(&desc->node, &imxdmac->ld_free);
                imxdmac->descs_allocated++;
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
        kfree(imxdmac->sg_list);
 
        imxdmac->sg_list = kcalloc(periods + 1,
-                       sizeof(struct scatterlist), GFP_KERNEL);
+                       sizeof(struct scatterlist), GFP_ATOMIC);
        if (!imxdmac->sg_list)
                return NULL;
 
index fc43603cf0bbeca883aa260d880c86898b49e793..e43c040dfe0bd6d8bba59bfe3e8e3aa608d3a95e 100644 (file)
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        if (error)
                sdmac->status = DMA_ERROR;
        else
-               sdmac->status = DMA_SUCCESS;
+               sdmac->status = DMA_COMPLETE;
 
        dma_cookie_complete(&sdmac->desc);
        if (sdmac->desc.callback)
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
        if (!sdma)
                return -ENOMEM;
index a975ebebea8aaf9b8950497eefdcaf3793d930d2..1aab8130efa1c75ae51906938447c91d12a66ddc 100644 (file)
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
                callback_txd(param_txd);
        }
        if (midc->raw_tfr) {
-               desc->status = DMA_SUCCESS;
+               desc->status = DMA_COMPLETE;
                if (desc->lli != NULL) {
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
+       if (ret != DMA_COMPLETE) {
                spin_lock_bh(&midc->lock);
                midc_scan_descriptors(to_middma_device(chan->device), midc);
                spin_unlock_bh(&midc->lock);
index 5ff6fc1819dc6a2e90c035956b23e23c56f9bb5d..a0f0fce5a84e1aefc27dadeea28cbca528cb8f85 100644 (file)
@@ -733,7 +733,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(c, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        device->cleanup_fn((unsigned long) c);
@@ -859,7 +859,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
 
        if (tmo == 0 ||
            dma->device_tx_status(dma_chan, cookie, NULL)
-                                       != DMA_SUCCESS) {
+                                       != DMA_COMPLETE) {
                dev_err(dev, "Self-test copy timed out, disabling\n");
                err = -ENODEV;
                goto unmap_dma;
index d8ececaf1b57082cc5709aca68714542657b5566..806b4ce5e38c79d84a5d759b108798dbd8b3b22a 100644 (file)
@@ -807,7 +807,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(c, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        ioat3_cleanup(ioat);
@@ -1468,7 +1468,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test xor timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1530,7 +1530,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1577,7 +1577,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test 2nd validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
index dd8b44a56e5d0f7090b8dd65bce87a73a60c90ef..408fe6be15f4f394a1a3467e7822d92031af4ec3 100644 (file)
@@ -864,7 +864,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
        int ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +983,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
        msleep(1);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-                       DMA_SUCCESS) {
+                       DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test copy timed out, disabling\n");
                err = -ENODEV;
@@ -1083,7 +1083,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test xor timed out, disabling\n");
                err = -ENODEV;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        iop_adma_issue_pending(dma_chan);
        msleep(8);
 
-       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test zero sum timed out, disabling\n");
                err = -ENODEV;
@@ -1158,7 +1158,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        iop_adma_issue_pending(dma_chan);
        msleep(8);
 
-       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test non-zero sum timed out, disabling\n");
                err = -ENODEV;
@@ -1254,7 +1254,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test pq timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
@@ -1291,7 +1291,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
@@ -1323,7 +1323,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
index a2c330f5f9521302ed04998dfb38eecb60b2f894..e26075408e9b95a365dfd188cad786593604412f 100644 (file)
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
        size_t bytes = 0;
 
        ret = dma_cookie_status(&c->vc.chan, cookie, state);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
 
        irq = platform_get_irq(op, 0);
        ret = devm_request_irq(&op->dev, irq,
-                       k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+                       k3_dma_int_handler, 0, DRIVER_NAME, d);
        if (ret)
                return ret;
 
index ff8d7827f8cbe80e2c66d78db1f50ad6fdd91b5a..dcb1e05149a7664c6e65a214d783080d540aaafd 100644 (file)
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
                 * move the descriptors to a temporary list so we can drop
                 * the lock during the entire cleanup operation
                 */
-               list_del(&desc->node);
-               list_add(&desc->node, &chain_cleanup);
+               list_move(&desc->node, &chain_cleanup);
 
                /*
                 * Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 
        if (irq) {
                ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+                       mmp_pdma_chan_handler, 0, "pdma", phy);
                if (ret) {
                        dev_err(pdev->dev, "channel request irq fail!\n");
                        return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
                /* all chan share one irq, demux inside */
                irq = platform_get_irq(op, 0);
                ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+                       mmp_pdma_int_handler, 0, "pdma", pdev);
                if (ret)
                        return ret;
        }
index 38cb517fb2ebd82034b02e2ab7d5cabfc9992cbd..2b4026d1f31d52d524d202e7eb7c1752dcc0b04b 100644 (file)
 #define TDCR_BURSTSZ_16B       (0x3 << 6)
 #define TDCR_BURSTSZ_32B       (0x6 << 6)
 #define TDCR_BURSTSZ_64B       (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_1B            (0x5 << 6)
+#define TDCR_BURSTSZ_SQU_2B            (0x6 << 6)
+#define TDCR_BURSTSZ_SQU_4B            (0x0 << 6)
+#define TDCR_BURSTSZ_SQU_8B            (0x1 << 6)
+#define TDCR_BURSTSZ_SQU_16B   (0x3 << 6)
 #define TDCR_BURSTSZ_SQU_32B   (0x7 << 6)
 #define TDCR_BURSTSZ_128B      (0x5 << 6)
 #define TDCR_DSTDIR_MSK                (0x3 << 4)      /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
        /* disable irq */
        writel(0, tdmac->reg_base + TDIMR);
 
-       tdmac->status = DMA_SUCCESS;
+       tdmac->status = DMA_COMPLETE;
 }
 
 static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
                        return -EINVAL;
                }
        } else if (tdmac->type == PXA910_SQU) {
-               tdcr |= TDCR_BURSTSZ_SQU_32B;
                tdcr |= TDCR_SSPMOD;
+
+               switch (tdmac->burst_sz) {
+               case 1:
+                       tdcr |= TDCR_BURSTSZ_SQU_1B;
+                       break;
+               case 2:
+                       tdcr |= TDCR_BURSTSZ_SQU_2B;
+                       break;
+               case 4:
+                       tdcr |= TDCR_BURSTSZ_SQU_4B;
+                       break;
+               case 8:
+                       tdcr |= TDCR_BURSTSZ_SQU_8B;
+                       break;
+               case 16:
+                       tdcr |= TDCR_BURSTSZ_SQU_16B;
+                       break;
+               case 32:
+                       tdcr |= TDCR_BURSTSZ_SQU_32B;
+                       break;
+               default:
+                       dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+                       return -EINVAL;
+               }
        }
 
        writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
 
        if (tdmac->irq) {
                ret = devm_request_irq(tdmac->dev, tdmac->irq,
-                       mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+                       mmp_tdma_chan_handler, 0, "tdma", tdmac);
                if (ret)
                        return ret;
        }
@@ -370,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
        int num_periods = buf_len / period_len;
        int i = 0, buf = 0;
 
-       if (tdmac->status != DMA_SUCCESS)
+       if (tdmac->status != DMA_COMPLETE)
                return NULL;
 
        if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -504,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
        tdmac->idx         = idx;
        tdmac->type        = type;
        tdmac->reg_base    = (unsigned long)tdev->base + idx * 4;
-       tdmac->status = DMA_SUCCESS;
+       tdmac->status = DMA_COMPLETE;
        tdev->tdmac[tdmac->idx] = tdmac;
        tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
 
@@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        if (irq_num != chan_num) {
                irq = platform_get_irq(pdev, 0);
                ret = devm_request_irq(&pdev->dev, irq,
-                       mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+                       mmp_tdma_int_handler, 0, "tdma", tdev);
                if (ret)
                        return ret;
        }
index 536dcb8ba5fdfe69ed5f726fc6b5897f00266698..8d5bce9e867e4fc40225fe642019aa9cb47fb6c0 100644 (file)
@@ -749,7 +749,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS) {
+       if (ret == DMA_COMPLETE) {
                mv_xor_clean_completed_slots(mv_chan);
                return ret;
        }
@@ -874,7 +874,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
        msleep(1);
 
        if (mv_xor_status(dma_chan, cookie, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test copy timed out, disabling\n");
                err = -ENODEV;
@@ -968,7 +968,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
        msleep(8);
 
        if (mv_xor_status(dma_chan, cookie, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test xor timed out, disabling\n");
                err = -ENODEV;
index ccd13df841db790ff9eabc9cbc9df79f5f8bb9af..7ab7cecc48a4abb14a21f59834f577b1f95e913a 100644 (file)
@@ -224,7 +224,7 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
 {
-       mxs_chan->status = DMA_SUCCESS;
+       mxs_chan->status = DMA_COMPLETE;
 }
 
 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -312,12 +312,12 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
                        if (mxs_chan->flags & MXS_DMA_SG_LOOP)
                                mxs_chan->status = DMA_IN_PROGRESS;
                        else
-                               mxs_chan->status = DMA_SUCCESS;
+                               mxs_chan->status = DMA_COMPLETE;
                }
 
                stat1 &= ~(1 << channel);
 
-               if (mxs_chan->status == DMA_SUCCESS)
+               if (mxs_chan->status == DMA_COMPLETE)
                        dma_cookie_complete(&mxs_chan->desc);
 
                /* schedule tasklet on this channel */
index ec3fc4fd9160e8aeddf16054cd405f35b43bfef7..2f66cf4e54fe367754378c3c8be213fe20bb8f64 100644 (file)
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        unsigned long flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS || !txstate)
+       if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
        spin_lock_irqsave(&c->vc.lock, flags);
index a562d24d20bf55179436d16086ca90f63d1b1894..a4568297341b33e558dc568578079788091ac785 100644 (file)
@@ -2903,6 +2903,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        pdat = dev_get_platdata(&adev->dev);
 
+       ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        /* Allocate a new DMAC and its Channels */
        pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
        if (!pdmac) {
@@ -2922,16 +2926,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        amba_set_drvdata(adev, pdmac);
 
-       irq = adev->irq[0];
-       ret = request_irq(irq, pl330_irq_handler, 0,
-                       dev_name(&adev->dev), pi);
-       if (ret)
-               return ret;
+       for (i = 0; i <= AMBA_NR_IRQS; i++) {
+               irq = adev->irq[i];
+               if (irq) {
+                       ret = devm_request_irq(&adev->dev, irq,
+                                              pl330_irq_handler, 0,
+                                              dev_name(&adev->dev), pi);
+                       if (ret)
+                               return ret;
+               } else {
+                       break;
+               }
+       }
 
        pi->pcfg.periph_id = adev->periphid;
        ret = pl330_add(pi);
        if (ret)
-               goto probe_err1;
+               return ret;
 
        INIT_LIST_HEAD(&pdmac->desc_pool);
        spin_lock_init(&pdmac->pool_lock);
@@ -3044,8 +3055,6 @@ probe_err3:
        }
 probe_err2:
        pl330_del(pi);
-probe_err1:
-       free_irq(irq, pi);
 
        return ret;
 }
@@ -3055,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev)
        struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
        struct dma_pl330_chan *pch, *_p;
        struct pl330_info *pi;
-       int irq;
 
        if (!pdmac)
                return 0;
@@ -3082,9 +3090,6 @@ static int pl330_remove(struct amba_device *adev)
 
        pl330_del(pi);
 
-       irq = adev->irq[0];
-       free_irq(irq, pi);
-
        return 0;
 }
 
index 370ff8265630cf05cdb64a571e9acb7d9064e08e..60e02ae38b04a46f4327ea76c0590b427808196d 100644 (file)
@@ -3891,7 +3891,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
 
        ppc440spe_chan = to_ppc440spe_adma_chan(chan);
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        ppc440spe_adma_slot_cleanup(ppc440spe_chan);
index 461a91ab70bb4feca82cd27c1582f81c2905bcde..ab26d46bbe1598434625979abeb488d5199992d9 100644 (file)
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(&c->vc.chan, cookie, state);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        if (!state)
index 45a520281ce10c7e1c8bf2ef6d18e6f253f72310..ebad84591a6e22bd1f28866c3f0b1946eba3dff0 100644 (file)
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
        void __iomem *base;
        const struct hpb_dmae_slave_config *cfg;
        char dev_id[16];                /* unique name per DMAC of channel */
+       dma_addr_t slave_addr;
 };
 
 struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
                hpb_chan->xfer_mode = XFER_DOUBLE;
        } else {
                dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
-               shdma_free_irq(&hpb_chan->shdma_chan);
                return -EINVAL;
        }
 
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
        return 0;
 }
 
-static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
+                             dma_addr_t slave_addr, bool try)
 {
        struct hpb_dmae_chan *chan = to_chan(schan);
        const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
        if (try)
                return 0;
        chan->cfg = sc;
+       chan->slave_addr = slave_addr ? : sc->addr;
        return hpb_dmae_alloc_chan_resources(chan, sc);
 }
 
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
 {
        struct hpb_dmae_chan *chan = to_chan(schan);
 
-       return chan->cfg->addr;
+       return chan->slave_addr;
 }
 
 static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
        shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
                BUG_ON(!schan);
 
-               shdma_free_irq(schan);
                shdma_chan_remove(schan);
        }
        dma_dev->chancnt = 0;
index d94ab592cc1bb21b92c851debe381609b599fa48..2e7b394def8058e4d1216ad8a07c785db4d87048 100644 (file)
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
         * If we don't find cookie on the queue, it has been aborted and we have
         * to report error
         */
-       if (status != DMA_SUCCESS) {
+       if (status != DMA_COMPLETE) {
                struct shdma_desc *sdesc;
                status = DMA_ERROR;
                list_for_each_entry(sdesc, &schan->ld_queue, node)
index 1069e8869f20762928ecbbe509b2ed294f82ae35..0d765c0e21ec9de8cb4e25ff66d5fd53ee78d95d 100644 (file)
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
 static int sh_dmae_probe(struct platform_device *pdev)
 {
        const struct sh_dmae_pdata *pdata;
-       unsigned long irqflags = IRQF_DISABLED,
+       unsigned long irqflags = 0,
                chan_flag[SH_DMAE_MAX_CHANNELS] = {};
        int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
        int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
                                    IORESOURCE_IRQ_SHAREABLE)
                                        chan_flag[irq_cnt] = IRQF_SHARED;
                                else
-                                       chan_flag[irq_cnt] = IRQF_DISABLED;
+                                       chan_flag[irq_cnt] = 0;
                                dev_dbg(&pdev->dev,
                                        "Found IRQ %d for channel %d\n",
                                        i, irq_cnt);
index 82d2b97ad942f96f2064c0ac58b11141fb85b54c..b8c031b7de4e045d22cfa0e7d495380bc94ed75f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/log2.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
        }
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS)
+       if (ret != DMA_COMPLETE)
                dma_set_residue(txstate, stedma40_residue(chan));
 
        if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
            src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
            dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
            dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
-           ((src_addr_width > 1) && (src_addr_width & 1)) ||
-           ((dst_addr_width > 1) && (dst_addr_width & 1)))
+           !is_power_of_2(src_addr_width) ||
+           !is_power_of_2(dst_addr_width))
                return -EINVAL;
 
        cfg->src_info.data_width = src_addr_width;
index 5d4986e5f5fa6b21423084b688bd0a8afbba0c2e..73654e33f13b98c66ebce532646056ecdce79c61 100644 (file)
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
 
        list_del(&sgreq->node);
        if (sgreq->last_sg) {
-               dma_desc->dma_status = DMA_SUCCESS;
+               dma_desc->dma_status = DMA_COMPLETE;
                dma_cookie_complete(&dma_desc->txd);
                if (!dma_desc->cb_count)
                        list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        unsigned int residual;
 
        ret = dma_cookie_status(dc, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
        return &dma_desc->txd;
 }
 
-struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
        struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
        size_t period_len, enum dma_transfer_direction direction,
        unsigned long flags, void *context)
index 71e8e775189e0df5568d474ea00157a2675f9260..c2829b481bf2c6b2d576e16aab730c06df375173 100644 (file)
@@ -962,8 +962,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
-               return DMA_SUCCESS;
+       if (ret == DMA_COMPLETE)
+               return DMA_COMPLETE;
 
        spin_lock_bh(&dc->lock);
        txx9dmac_scan_descriptors(dc);
index ff080ee201973bd3c76513d11d36273d70971e59..1b5e8e46226d5f3d6bebdb770d8b29de9f04fc43 100644 (file)
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
        host_control_action = HC_ACTION_NONE;
        host_control_smi_type = HC_SMITYPE_NONE;
 
+       dcdbas_pdev = dev;
+
        /*
         * BIOS SMI calls require buffer addresses be in 32-bit address space.
         * This is done by setting the DMA mask below.
         */
-       dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+       error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+       if (error)
+               return error;
 
        error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
        if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
        .remove         = dcdbas_remove,
 };
 
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+       .name           = DRIVER_NAME,
+       .id             = -1,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
 /**
  * dcdbas_init: initialize driver
  */
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
        if (error)
                return error;
 
-       dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
-       if (!dcdbas_pdev) {
-               error = -ENOMEM;
+       dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+       if (IS_ERR(dcdbas_pdev_reg)) {
+               error = PTR_ERR(dcdbas_pdev_reg);
                goto err_unregister_driver;
        }
 
-       error = platform_device_add(dcdbas_pdev);
-       if (error)
-               goto err_free_device;
-
        return 0;
 
- err_free_device:
-       platform_device_put(dcdbas_pdev);
  err_unregister_driver:
        platform_driver_unregister(&dcdbas_driver);
        return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
         * all sysfs attributes belonging to this module have been
         * released.
         */
-       smi_data_buf_free();
-       platform_device_unregister(dcdbas_pdev);
+       if (dcdbas_pdev)
+               smi_data_buf_free();
+       platform_device_unregister(dcdbas_pdev_reg);
        platform_driver_unregister(&dcdbas_driver);
 }
 
index 6eb535ffeddc2ea7787f648d1d35734f24c305d5..e5a67b24587ac0efe1848dc08a6b465ef744f553 100644 (file)
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
 static struct kobject *gsmi_kobj;
 static struct efivars efivars;
 
+static const struct platform_device_info gsmi_dev_info = {
+       .name           = "gsmi",
+       .id             = -1,
+       /* SMI callbacks require 32bit addresses */
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
 static __init int gsmi_init(void)
 {
        unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
        gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
 
        /* register device */
-       gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+       gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
        if (IS_ERR(gsmi_dev.pdev)) {
                printk(KERN_ERR "gsmi: unable to register platform device\n");
                return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
        /* SMI access needs to be serialized */
        spin_lock_init(&gsmi_dev.lock);
 
-       /* SMI callbacks require 32bit addresses */
-       gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       gsmi_dev.pdev->dev.dma_mask =
-               &gsmi_dev.pdev->dev.coherent_dma_mask;
        ret = -ENOMEM;
        gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
                                             GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
index 2d9ca6055e5e0bde239247fabe41c960d4dd3020..41b5913ddabe6e0a8b1f417004d86628234b77c5 100644 (file)
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
        struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        u32 base, pin, mask;
-       unsigned long reg, pending;
+       unsigned long reg, ena, pending;
        unsigned virq;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < lg->chip.ngpio; base += 32) {
                reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+               ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
 
-               while ((pending = inl(reg))) {
+               while ((pending = (inl(reg) & inl(ena)))) {
                        pin = __ffs(pending);
                        mask = BIT(pin);
                        /* Clear before handling so we don't lose an edge */
index 0ff43552d472bf09d5ab0206437e62ae0392c6f1..89675f862308f95d41d962c3faae99b80a458869 100644 (file)
@@ -63,6 +63,7 @@ struct gpio_bank {
        struct gpio_chip chip;
        struct clk *dbck;
        u32 mod_usage;
+       u32 irq_usage;
        u32 dbck_enable_mask;
        bool dbck_enabled;
        struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
 #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
 #define GPIO_MOD_CTRL_BIT      BIT(0)
 
+#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
+#define LINE_USED(line, offset) (line & (1 << offset))
+
 static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 {
        return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
        return 0;
 }
 
+static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
+{
+       if (bank->regs->pinctrl) {
+               void __iomem *reg = bank->base + bank->regs->pinctrl;
+
+               /* Claim the pin for MPU */
+               __raw_writel(__raw_readl(reg) | (1 << offset), reg);
+       }
+
+       if (bank->regs->ctrl && !BANK_USED(bank)) {
+               void __iomem *reg = bank->base + bank->regs->ctrl;
+               u32 ctrl;
+
+               ctrl = __raw_readl(reg);
+               /* Module is enabled, clocks are not gated */
+               ctrl &= ~GPIO_MOD_CTRL_BIT;
+               __raw_writel(ctrl, reg);
+               bank->context.ctrl = ctrl;
+       }
+}
+
+static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
+{
+       void __iomem *base = bank->base;
+
+       if (bank->regs->wkup_en &&
+           !LINE_USED(bank->mod_usage, offset) &&
+           !LINE_USED(bank->irq_usage, offset)) {
+               /* Disable wake-up during idle for dynamic tick */
+               _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+               bank->context.wake_en =
+                       __raw_readl(bank->base + bank->regs->wkup_en);
+       }
+
+       if (bank->regs->ctrl && !BANK_USED(bank)) {
+               void __iomem *reg = bank->base + bank->regs->ctrl;
+               u32 ctrl;
+
+               ctrl = __raw_readl(reg);
+               /* Module is disabled, clocks are gated */
+               ctrl |= GPIO_MOD_CTRL_BIT;
+               __raw_writel(ctrl, reg);
+               bank->context.ctrl = ctrl;
+       }
+}
+
+static int gpio_is_input(struct gpio_bank *bank, int mask)
+{
+       void __iomem *reg = bank->base + bank->regs->direction;
+
+       return __raw_readl(reg) & mask;
+}
+
 static int gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
        unsigned gpio = 0;
        int retval;
        unsigned long flags;
+       unsigned offset;
 
-       if (WARN_ON(!bank->mod_usage))
-               return -EINVAL;
+       if (!BANK_USED(bank))
+               pm_runtime_get_sync(bank->dev);
 
 #ifdef CONFIG_ARCH_OMAP1
        if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
                return -EINVAL;
 
        spin_lock_irqsave(&bank->lock, flags);
-       retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
+       offset = GPIO_INDEX(bank, gpio);
+       retval = _set_gpio_triggering(bank, offset, type);
+       if (!LINE_USED(bank->mod_usage, offset)) {
+               _enable_gpio_module(bank, offset);
+               _set_gpio_direction(bank, offset, 1);
+       } else if (!gpio_is_input(bank, 1 << offset)) {
+               spin_unlock_irqrestore(&bank->lock, flags);
+               return -EINVAL;
+       }
+
+       bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
        spin_unlock_irqrestore(&bank->lock, flags);
 
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
         * If this is the first gpio_request for the bank,
         * enable the bank module.
         */
-       if (!bank->mod_usage)
+       if (!BANK_USED(bank))
                pm_runtime_get_sync(bank->dev);
 
        spin_lock_irqsave(&bank->lock, flags);
        /* Set trigger to none. You need to enable the desired trigger with
-        * request_irq() or set_irq_type().
+        * request_irq() or set_irq_type(). Only do this if the IRQ line has
+        * not already been requested.
         */
-       _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-
-       if (bank->regs->pinctrl) {
-               void __iomem *reg = bank->base + bank->regs->pinctrl;
-
-               /* Claim the pin for MPU */
-               __raw_writel(__raw_readl(reg) | (1 << offset), reg);
-       }
-
-       if (bank->regs->ctrl && !bank->mod_usage) {
-               void __iomem *reg = bank->base + bank->regs->ctrl;
-               u32 ctrl;
-
-               ctrl = __raw_readl(reg);
-               /* Module is enabled, clocks are not gated */
-               ctrl &= ~GPIO_MOD_CTRL_BIT;
-               __raw_writel(ctrl, reg);
-               bank->context.ctrl = ctrl;
+       if (!LINE_USED(bank->irq_usage, offset)) {
+               _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+               _enable_gpio_module(bank, offset);
        }
-
        bank->mod_usage |= 1 << offset;
-
        spin_unlock_irqrestore(&bank->lock, flags);
 
        return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
 {
        struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
-       void __iomem *base = bank->base;
        unsigned long flags;
 
        spin_lock_irqsave(&bank->lock, flags);
-
-       if (bank->regs->wkup_en) {
-               /* Disable wake-up during idle for dynamic tick */
-               _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
-               bank->context.wake_en =
-                       __raw_readl(bank->base + bank->regs->wkup_en);
-       }
-
        bank->mod_usage &= ~(1 << offset);
-
-       if (bank->regs->ctrl && !bank->mod_usage) {
-               void __iomem *reg = bank->base + bank->regs->ctrl;
-               u32 ctrl;
-
-               ctrl = __raw_readl(reg);
-               /* Module is disabled, clocks are gated */
-               ctrl |= GPIO_MOD_CTRL_BIT;
-               __raw_writel(ctrl, reg);
-               bank->context.ctrl = ctrl;
-       }
-
+       _disable_gpio_module(bank, offset);
        _reset_gpio(bank, bank->chip.base + offset);
        spin_unlock_irqrestore(&bank->lock, flags);
 
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
         * If this is the last gpio to be freed in the bank,
         * disable the bank module.
         */
-       if (!bank->mod_usage)
+       if (!BANK_USED(bank))
                pm_runtime_put(bank->dev);
 }
 
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
        struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
        unsigned int gpio = irq_to_gpio(bank, d->hwirq);
        unsigned long flags;
+       unsigned offset = GPIO_INDEX(bank, gpio);
 
        spin_lock_irqsave(&bank->lock, flags);
+       bank->irq_usage &= ~(1 << offset);
+       _disable_gpio_module(bank, offset);
        _reset_gpio(bank, gpio);
        spin_unlock_irqrestore(&bank->lock, flags);
+
+       /*
+        * If this is the last IRQ to be freed in the bank,
+        * disable the bank module.
+        */
+       if (!BANK_USED(bank))
+               pm_runtime_put(bank->dev);
 }
 
 static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
        return 0;
 }
 
-static int gpio_is_input(struct gpio_bank *bank, int mask)
-{
-       void __iomem *reg = bank->base + bank->regs->direction;
-
-       return __raw_readl(reg) & mask;
-}
-
 static int gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
 {
        struct gpio_bank *bank;
        unsigned long flags;
+       int retval = 0;
 
        bank = container_of(chip, struct gpio_bank, chip);
        spin_lock_irqsave(&bank->lock, flags);
+
+       if (LINE_USED(bank->irq_usage, offset)) {
+                       retval = -EINVAL;
+                       goto exit;
+       }
+
        bank->set_dataout(bank, offset, value);
        _set_gpio_direction(bank, offset, 0);
+
+exit:
        spin_unlock_irqrestore(&bank->lock, flags);
-       return 0;
+       return retval;
 }
 
 static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
        struct gpio_bank *bank;
 
        list_for_each_entry(bank, &omap_gpio_list, node) {
-               if (!bank->mod_usage || !bank->loses_context)
+               if (!BANK_USED(bank) || !bank->loses_context)
                        continue;
 
                bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
        struct gpio_bank *bank;
 
        list_for_each_entry(bank, &omap_gpio_list, node) {
-               if (!bank->mod_usage || !bank->loses_context)
+               if (!BANK_USED(bank) || !bank->loses_context)
                        continue;
 
                pm_runtime_get_sync(bank->dev);
index e3745eb075708092d0a56e04f7bb2a10b4d274dd..6038966ab045529b071242424e49b12237db316a 100644 (file)
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
        if (pdata) {
                p->config = *pdata;
        } else if (IS_ENABLED(CONFIG_OF) && np) {
-               ret = of_parse_phandle_with_args(np, "gpio-ranges",
-                               "#gpio-range-cells", 0, &args);
-               p->config.number_of_pins = ret == 0 && args.args_count == 3
-                                        ? args.args[2]
+               ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
+                                                      &args);
+               p->config.number_of_pins = ret == 0 ? args.args[2]
                                         : RCAR_MAX_GPIO_PER_BANK;
                p->config.gpio_base = -1;
        }
index 8ea3b33d4b40bb4b96ad7edd83f6dac7e219aa03..a90be34e4d5c24569dc64a320e59d0a9474e0908 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/module.h>
-
+#include <linux/io.h>
 #include <mach/hardware.h>
 #include <mach/irqs.h>
 
index 5c1ef2b3ef188253e0031b0ceec2e65239220df5..f2beb728ed8f32cf8eeb1b6135fcf652796ab294 100644 (file)
@@ -73,15 +73,8 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
 {
        struct acpi_gpio_evt_pin *evt_pin = data;
-       struct acpi_object_list args;
-       union acpi_object arg;
 
-       arg.type = ACPI_TYPE_INTEGER;
-       arg.integer.value = evt_pin->pin;
-       args.count = 1;
-       args.pointer = &arg;
-
-       acpi_evaluate_object(evt_pin->evt_handle, NULL, &args, NULL);
+       acpi_execute_simple_method(evt_pin->evt_handle, NULL, evt_pin->pin);
 
        return IRQ_HANDLED;
 }
index 86ef3461ec0647b42f1e1ff94a9f47225151f7f6..0dee0e0c247ae5fa2f121df234d09979dc00df74 100644 (file)
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
  */
 static int desc_to_gpio(const struct gpio_desc *desc)
 {
-       return desc->chip->base + gpio_chip_hwgpio(desc);
+       return desc - &gpio_desc[0];
 }
 
 
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       if (!desc || !desc->chip) {
+       if (!desc) {
                pr_warn("%s: invalid GPIO\n", __func__);
                return -EINVAL;
        }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        spin_lock_irqsave(&gpio_lock, flags);
 
        chip = desc->chip;
+       if (chip == NULL)
+               goto done;
 
        if (!try_module_get(chip->owner))
                goto done;
index b4fb86d89850a31c3cc9424f4b4148831c59922f..224ff965bcf7de624c3b62b6db153bda090ea4a9 100644 (file)
 
 #include <drm/drmP.h>
 
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
 /**
  * Free a handle from the context bitmap.
  *
  * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
  * lock.
  */
-static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
        mutex_lock(&dev->struct_mutex);
        idr_remove(&dev->ctx_idr, ctx_handle);
        mutex_unlock(&dev->struct_mutex);
 }
 
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-void drm_legacy_ctxbitmap_release(struct drm_device *dev,
-                                 struct drm_file *file_priv)
-{
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       mutex_lock(&dev->ctxlist_mutex);
-       if (!list_empty(&dev->ctxlist)) {
-               struct drm_ctx_list *pos, *n;
-
-               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
-                       if (pos->tag == file_priv &&
-                           pos->handle != DRM_KERNEL_CONTEXT) {
-                               if (dev->driver->context_dtor)
-                                       dev->driver->context_dtor(dev,
-                                                                 pos->handle);
-
-                               drm_ctxbitmap_free(dev, pos->handle);
-
-                               list_del(&pos->head);
-                               kfree(pos);
-                               --dev->ctx_count;
-                       }
-               }
-       }
-       mutex_unlock(&dev->ctxlist_mutex);
-}
-
 /**
  * Context bitmap allocation.
  *
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
        idr_init(&dev->ctx_idr);
+       return 0;
 }
 
 /**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
  * Free all idr members using drm_ctx_sarea_free helper function
  * while holding the drm_device::struct_mutex lock.
  */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
 {
        mutex_lock(&dev->struct_mutex);
        idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map;
        struct drm_map_list *_entry;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        mutex_lock(&dev->struct_mutex);
 
        map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map = NULL;
        struct drm_map_list *r_list = NULL;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(r_list, &dev->maplist, head) {
                if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
        struct drm_ctx ctx;
        int i;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        if (res->count >= DRM_RESERVED_CONTEXTS) {
                memset(&ctx, 0, sizeof(ctx));
                for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
        struct drm_ctx_list *ctx_entry;
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        ctx->handle = drm_ctxbitmap_next(dev);
        if (ctx->handle == DRM_KERNEL_CONTEXT) {
                /* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        /* This is 0, because we don't handle any context flags */
        ctx->flags = 0;
 
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        if (ctx->handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_dtor)
index e572dd20bdee037fed5cdce356191ea84fd41e09..05ad9ba0a67e8ab4c9e79aade5b8ead441b7f3a8 100644 (file)
@@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
                cmd = ioctl->cmd_drv;
        }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               u32 drv_size;
+
                ioctl = &drm_ioctls[nr];
-               cmd = ioctl->cmd;
+
+               drv_size = _IOC_SIZE(ioctl->cmd);
                usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+
+               cmd = ioctl->cmd;
        } else
                goto err_i1;
 
index 1688ff500513142d6d5072efb8061f1ae231bb3d..830f7501cb4d4f12fd914ec1bac6b0bbf7bae98d 100644 (file)
@@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
                        /* Speaker Allocation Data Block */
                        if (dbl == 3) {
                                *sadb = kmalloc(dbl, GFP_KERNEL);
+                               if (!*sadb)
+                                       return -ENOMEM;
                                memcpy(*sadb, &db[1], dbl);
                                count = dbl;
                                break;
index 0cfb60f5476655edc097ca71c648d11544f3357e..d18b88b755c34f876dc2d4b9f0a202709dc87f5d 100644 (file)
@@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
                goto fail;
        }
 
-       if (!client->driver) {
+       if (!client->dev.driver) {
                err = -ENODEV;
                goto fail_unregister;
        }
 
-       module = client->driver->driver.owner;
+       module = client->dev.driver->owner;
        if (!try_module_get(module)) {
                err = -ENODEV;
                goto fail_unregister;
@@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
 
        encoder->bus_priv = client;
 
-       encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+       encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
 
        err = encoder_drv->encoder_init(client, dev, encoder);
        if (err)
@@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
 {
        struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
        struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
-       struct module *module = client->driver->driver.owner;
+       struct module *module = client->dev.driver->owner;
 
        i2c_unregister_device(client);
        encoder->bus_priv = NULL;
index f6f6cc7fc133292e9fe3375502466b0abd1b0fa9..3d13ca6e257f0f2c6bd366a49f8b7f3622e592dd 100644 (file)
@@ -407,14 +407,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
        struct drm_connector *connector;
        int i, j;
 
-       /*
-        * fbdev->blank can be called from irq context in case of a panic.
-        * Since we already have our own special panic handler which will
-        * restore the fbdev console mode completely, just bail out early.
-        */
-       if (oops_in_progress)
-               return;
-
        /*
         * fbdev->blank can be called from irq context in case of a panic.
         * Since we already have our own special panic handler which will
index 4be8e09a32ef730db4740f6f69e52f91ca829112..3f84277d7036b3f843120fb4145dc081010d3d0d 100644 (file)
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
        if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_release(dev, file_priv);
 
-       drm_legacy_ctxbitmap_release(dev, file_priv);
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
 
        mutex_lock(&dev->struct_mutex);
 
index e7eb0276f7f1968e6c71997eab517fd3a6746fac..39d864576be4a4d5f5957cdbf1e3112b1322a018 100644 (file)
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
                        goto error_out_unreg;
        }
 
-       drm_legacy_ctxbitmap_init(dev);
+
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
 
        if (driver->driver_features & DRIVER_GEM) {
                retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
                drm_rmmap(dev, r_list->map);
        drm_ht_remove(&dev->map_hash);
 
-       drm_legacy_ctxbitmap_cleanup(dev);
+       drm_ctxbitmap_cleanup(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_put_minor(&dev->control);
index 4752f223e5b28a2fb793eb5b83be0fefb89a7ac2..45b6ef595965b7cb7391125d8b3a4caf2703b2b5 100644 (file)
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
 
 config DRM_EXYNOS_FIMC
        bool "Exynos DRM FIMC"
-       depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
+       depends on DRM_EXYNOS_IPP && MFD_SYSCON
        help
          Choose this option if you want to use Exynos FIMC for DRM.
 
index 3445a0f3a6b29dc6110d0e3a950a23de92a4098c..9c8088462c26f970d42b2fb40dae86490e51d3f6 100644 (file)
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
                        return -ENOMEM;
                }
 
-               buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+               buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+                                       buf->size,
                                        &buf->dma_addr, GFP_KERNEL,
                                        &buf->dma_attrs);
                if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
        }
 
        buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-       if (!buf->sgt) {
+       if (IS_ERR(buf->sgt)) {
                DRM_ERROR("failed to get sg table.\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(buf->sgt);
                goto err_free_attrs;
        }
 
index bb82ef78ca851101458a76178656a6f5646137b1..81192d00b39ec55f72afc68aa3ce0aab448a3e5d 100644 (file)
@@ -286,7 +286,11 @@ static struct drm_driver exynos_drm_driver = {
 
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       int ret;
+
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        return drm_platform_init(&exynos_drm_driver, pdev);
 }
index 78e868bcf1ecc364638e4ce3343cc66f260599f0..e7c2f2d07f193b0393052be2e1bf32921b804da9 100644 (file)
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
                if (is_drm_iommu_supported(dev)) {
                        unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
 
-                       buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+                       buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+                                       nr_pages, VM_MAP,
                                        pgprot_writecombine(PAGE_KERNEL));
                } else {
                        phys_addr_t dma_addr = buffer->dma_addr;
                        if (dma_addr)
-                               buffer->kvaddr = phys_to_virt(dma_addr);
+                               buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
                        else
                                buffer->kvaddr = (void __iomem *)NULL;
                }
index 92babac362ec0b85b6f1e52f05a6402411b43d1e..2db731f00930d7b28d85e124744423c8a37dd8b4 100644 (file)
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
+       gt->npage = gt->gem.size / PAGE_SIZE;
        gt->pages = pages;
 
        return 0;
index b1f8fc69023fd97f9ee2ac28b3aaf35760aa3323..60e84043aa348fe3ec4293507edeb6b9477e3dc2 100644 (file)
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
                reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
                break;
        case DRM_MODE_DPMS_OFF:
-               /* disable audio and video ports */
-               reg_write(encoder, REG_ENA_AP, 0x00);
+               /* disable video ports */
                reg_write(encoder, REG_ENA_VP_0, 0x00);
                reg_write(encoder, REG_ENA_VP_1, 0x00);
                reg_write(encoder, REG_ENA_VP_2, 0x00);
index c27a21034a5e56e6fca41d6ff710c9abef59bf72..d5c784d486714d4cc5b9e1a45c8b17fe13d51871 100644 (file)
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * then we do not take part in VGA arbitration and the
         * vga_client_register() fails with -ENODEV.
         */
-       if (!HAS_PCH_SPLIT(dev)) {
-               ret = vga_client_register(dev->pdev, dev, NULL,
-                                         i915_vga_set_decode);
-               if (ret && ret != -ENODEV)
-                       goto out;
-       }
+       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       if (ret && ret != -ENODEV)
+               goto out;
 
        intel_register_dsm_handler();
 
@@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
         */
        intel_fbdev_initial_config(dev);
 
-       /*
-        * Must do this after fbcon init so that
-        * vgacon_save_screen() works during the handover.
-        */
-       i915_disable_vga_mem(dev);
-
        /* Only enable hotplug handling once the fbdev is fully set up. */
        dev_priv->enable_hotplug_processing = true;
 
index 69d8ed5416c31b2e80538fabec553e333fd741ea..2ad27880cd047bc93cf119784895040eeafc3bae 100644 (file)
@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
                intel_modeset_suspend_hw(dev);
        }
 
+       i915_gem_suspend_gtt_mappings(dev);
+
        i915_save_state(dev);
 
        intel_opregion_fini(dev);
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                i915_gem_restore_gtt_mappings(dev);
                mutex_unlock(&dev->struct_mutex);
-       }
+       } else if (drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_check_and_clear_faults(dev);
 
        __i915_drm_thaw(dev);
 
index 35874b3a86dcc917c9bb68e1cb4879d81a0fc76b..ab0f2c0a440c6a4543a59d81282c4ab21316b2cc 100644 (file)
@@ -497,10 +497,12 @@ struct i915_address_space {
 
        /* FIXME: Need a more generic return type */
        gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
-                                    enum i915_cache_level level);
+                                    enum i915_cache_level level,
+                                    bool valid); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
                            unsigned int first_entry,
-                           unsigned int num_entries);
+                           unsigned int num_entries,
+                           bool use_scratch);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
                               unsigned int first_entry,
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj);
 
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
index 8507c6d1e642d872c48f41834a5dbeca53d99c6b..cdfb9da0e4ce944529a329ce29f92d391e19973a 100644 (file)
@@ -1392,14 +1392,11 @@ out:
                if (i915_terminally_wedged(&dev_priv->gpu_error))
                        return VM_FAULT_SIGBUS;
        case -EAGAIN:
-               /* Give the error handler a chance to run and move the
-                * objects off the GPU active list. Next time we service the
-                * fault, we should be able to transition the page into the
-                * GTT without touching the GPU (and so avoid further
-                * EIO/EGAIN). If the GPU is wedged, then there is no issue
-                * with coherency, just lost writes.
+               /*
+                * EAGAIN means the gpu is hung and we'll wait for the error
+                * handler to reset everything when re-faulting in
+                * i915_mutex_lock_interruptible.
                 */
-               set_need_resched();
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
@@ -4803,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (!mutex_trylock(&dev->struct_mutex)) {
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
-                       return SHRINK_STOP;
+                       return 0;
 
                if (dev_priv->mm.shrinker_no_lock_stealing)
-                       return SHRINK_STOP;
+                       return 0;
 
                unlock = false;
        }
@@ -4904,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (!mutex_trylock(&dev->struct_mutex)) {
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
-                       return 0;
+                       return SHRINK_STOP;
 
                if (dev_priv->mm.shrinker_no_lock_stealing)
-                       return 0;
+                       return SHRINK_STOP;
 
                unlock = false;
        }
index 212f6d8c35ec6593cc54957ecd24445196d26657..1f7b4caefb6e0776bf61d78af519812765b37741 100644 (file)
 #define HSW_WT_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0x6)
 
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 #define BYT_PTE_SNOOPED_BY_CPU_CACHES  (1 << 2)
 
 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        /* Mark the page as writeable.  Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
-                                     enum i915_cache_level level)
+                                     enum i915_cache_level level,
+                                     bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   unsigned first_entry,
-                                  unsigned num_entries)
+                                  unsigned num_entries,
+                                  bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                dma_addr_t page_addr;
 
                page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
+               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        }
 
        ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
+                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
 
        ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 {
        ppgtt->base.clear_range(&ppgtt->base,
                                i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT);
+                               obj->base.size >> PAGE_SHIFT,
+                               true);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       for_each_ring(ring, dev_priv, i) {
+               u32 fault_reg;
+               fault_reg = I915_READ(RING_FAULT_REG(ring));
+               if (fault_reg & RING_FAULT_VALID) {
+                       DRM_DEBUG_DRIVER("Unexpected fault\n"
+                                        "\tAddr: 0x%08lx\\n"
+                                        "\tAddress space: %s\n"
+                                        "\tSource ID: %d\n"
+                                        "\tType: %d\n",
+                                        fault_reg & PAGE_MASK,
+                                        fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+                                        RING_FAULT_SRCID(fault_reg),
+                                        RING_FAULT_FAULT_TYPE(fault_reg));
+                       I915_WRITE(RING_FAULT_REG(ring),
+                                  fault_reg & ~RING_FAULT_VALID);
+               }
+       }
+       POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Don't bother messing with faults pre GEN6 as we have little
+        * documentation supporting that it's a good idea.
+        */
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       i915_check_and_clear_faults(dev);
+
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      dev_priv->gtt.base.start / PAGE_SIZE,
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      false);
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
+       i915_check_and_clear_faults(dev);
+
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE);
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
                i++;
        }
 
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         */
        if (i != 0)
                WARN_ON(readl(&gtt_entries[i-1]) !=
-                       vm->pte_encode(addr, level));
+                       vm->pte_encode(addr, level, true));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
        readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool unused)
 {
        intel_gtt_clear_range(first_entry, num_entries);
 }
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       entry,
-                                      obj->base.size >> PAGE_SHIFT);
+                                      obj->base.size >> PAGE_SHIFT,
+                                      true);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
+               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
+       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
 }
 
 static bool
index aba9d7498996c29845e6691ac4eff83c8fe85223..dae364f0028cc94d58f87613449b41d7620d7f29 100644 (file)
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
 
        /* Seek the first printf which is hits start position */
        if (e->pos < e->start) {
-               len = vsnprintf(NULL, 0, f, args);
-               if (!__i915_error_seek(e, len))
+               va_list tmp;
+
+               va_copy(tmp, args);
+               if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
                        return;
        }
 
index 83cce0cdb7691a9aa6024defc01c869a57b4a90a..4b91228fd9bd8e50e1319816a9fe53b6f166ec68 100644 (file)
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        return ret;
 }
 
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+                              bool reset_completed)
+{
+       struct intel_ring_buffer *ring;
+       int i;
+
+       /*
+        * Notify all waiters for GPU completion events that reset state has
+        * been changed, and that they need to restart their wait after
+        * checking for potential errors (and bail out to drop locks if there is
+        * a gpu reset pending so that i915_error_work_func can acquire them).
+        */
+
+       /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+       for_each_ring(ring, dev_priv, i)
+               wake_up_all(&ring->irq_queue);
+
+       /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+       wake_up_all(&dev_priv->pending_flip_queue);
+
+       /*
+        * Signal tasks blocked in i915_gem_wait_for_error that the pending
+        * reset state is cleared.
+        */
+       if (reset_completed)
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
 /**
  * i915_error_work_func - do process context error handling work
  * @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
                                                    gpu_error);
        struct drm_device *dev = dev_priv->dev;
-       struct intel_ring_buffer *ring;
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-       int i, ret;
+       int ret;
 
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
                kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
                                   reset_event);
 
+               /*
+                * All state reset _must_ be completed before we update the
+                * reset counter, for otherwise waiters might miss the reset
+                * pending state and not properly drop locks, resulting in
+                * deadlocks with the reset work.
+                */
                ret = i915_reset(dev);
 
+               intel_display_handle_reset(dev);
+
                if (ret == 0) {
                        /*
                         * After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
                        atomic_set(&error->reset_counter, I915_WEDGED);
                }
 
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
-
-               intel_display_handle_reset(dev);
-
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
+               /*
+                * Note: The wake_up also serves as a memory barrier so that
+                * waiters see the update value of the reset counter atomic_t.
+                */
+               i915_error_wake_up(dev_priv, true);
        }
 }
 
@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       int i;
 
        i915_capture_error_state(dev);
        i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
                                &dev_priv->gpu_error.reset_counter);
 
                /*
-                * Wakeup waiting processes so that the reset work item
-                * doesn't deadlock trying to grab various locks.
+                * Wakeup waiting processes so that the reset work function
+                * i915_error_work_func doesn't deadlock trying to grab various
+                * locks. By bumping the reset counter first, the woken
+                * processes will see a reset in progress and back off,
+                * releasing their locks and then wait for the reset completion.
+                * We must do this for _all_ gpu waiters that might hold locks
+                * that the reset work needs to acquire.
+                *
+                * Note: The wake_up serves as the required memory barrier to
+                * ensure that the waiters see the updated value of the reset
+                * counter atomic_t.
                 */
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
+               i915_error_wake_up(dev_priv, false);
        }
 
        /*
index c159e1a6810fbd8f04e60520dfc5fdb884d8dbff..ef9b35479f0136d0cb23ec7f6eb2bb59afa85e46 100644 (file)
 #define   ARB_MODE_SWIZZLE_IVB (1<<5)
 #define RENDER_HWS_PGA_GEN7    (0x04080)
 #define RING_FAULT_REG(ring)   (0x4094 + 0x100*(ring)->id)
+#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_SRCID(x)  ((x >> 3) & 0xff)
+#define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
+#define   RING_FAULT_VALID     (1<<0)
 #define DONE_REG               0x40b0
 #define BSD_HWS_PGA_GEN7       (0x04180)
 #define BLT_HWS_PGA_GEN7       (0x04280)
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
 
+#define HSW_SCRATCH1                           0xb038
+#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
+
 #define HSW_FUSE_STRAP         0x42014
 #define  HSW_CDCLK_LIMIT       (1 << 24)
 
 #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D    0xc2020
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
 #define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
 
 /* CPU: FDI_TX */
 #define GEN7_ROW_CHICKEN2_GT2          0xf4f4
 #define   DOP_CLOCK_GATING_DISABLE     (1<<0)
 
+#define HSW_ROW_CHICKEN3               0xe49c
+#define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
+
 #define G4X_AUD_VID_DID                        (dev_priv->info->display_mmio_offset + 0x62020)
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
index 57fe1ae32a0d69d767eb7d698cfeab2ead4231e2..43959edd4291193a538449c1d4f75bba8d30ea8a 100644 (file)
@@ -193,16 +193,14 @@ out:
 
 static bool intel_dsm_pci_probe(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, intel_handle;
-       acpi_status status;
+       acpi_handle dhandle;
        int ret;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
 
-       status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(dhandle, "_DSM")) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
                return false;
        }
index 63aca49d11a843a6ad6ae1a62dc11d0b3bfd470c..beb7f65cd01f4e1564f12e570f190b3bcd90623f 100644 (file)
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                        /* Can only use the always-on power well for eDP when
                         * not using the panel fitter, and when not using motion
                          * blur mitigation (which we don't support). */
-                       if (intel_crtc->config.pch_pfit.size)
+                       if (intel_crtc->config.pch_pfit.enabled)
                                temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
                        else
                                temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       switch (temp & TRANS_DDI_BPC_MASK) {
+       case TRANS_DDI_BPC_6:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case TRANS_DDI_BPC_8:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case TRANS_DDI_BPC_10:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case TRANS_DDI_BPC_12:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
index 2489d0b4c7d2db8a8b5d74c04107f1b405c8ad35..725f0bea1e4c9647a43cbb04acb08ec750a4a75a 100644 (file)
@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                I915_WRITE(PIPESRC(intel_crtc->pipe),
                           ((crtc->mode.hdisplay - 1) << 16) |
                           (crtc->mode.vdisplay - 1));
-               if (!intel_crtc->config.pch_pfit.size &&
+               if (!intel_crtc->config.pch_pfit.enabled &&
                    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
                     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
                        I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = crtc->pipe;
 
-       if (crtc->config.pch_pfit.size) {
+       if (crtc->config.pch_pfit.enabled) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
-       if (crtc->config.pch_pfit.size) {
+       if (crtc->config.pch_pfit.enabled) {
                I915_WRITE(PF_CTL(pipe), 0);
                I915_WRITE(PF_WIN_POS(pipe), 0);
                I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
  * consider. */
 void intel_connector_dpms(struct drm_connector *connector, int mode)
 {
-       struct intel_encoder *encoder = intel_attached_encoder(connector);
-
        /* All the simple cases only support two dpms states. */
        if (mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
@@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
        connector->dpms = mode;
 
        /* Only need to change hw state when actually enabled */
-       if (encoder->base.crtc)
-               intel_encoder_dpms(encoder, mode);
-       else
-               WARN_ON(encoder->connectors_active != false);
+       if (connector->encoder)
+               intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
 
        intel_modeset_check_state(connector->dev);
 }
@@ -4775,6 +4771,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
        pipeconf = 0;
 
+       if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+           I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+               pipeconf |= PIPECONF_ENABLE;
+
        if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
                /* Enable pixel doubling when the dot clock is > 90% of the (display)
                 * core speed.
@@ -4877,9 +4877,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        if (is_lvds && dev_priv->lvds_downclock_avail) {
                /*
                 * Ensure we match the reduced clock's P to the target clock.
@@ -4986,6 +4983,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+               switch (tmp & PIPECONF_BPC_MASK) {
+               case PIPECONF_6BPC:
+                       pipe_config->pipe_bpp = 18;
+                       break;
+               case PIPECONF_8BPC:
+                       pipe_config->pipe_bpp = 24;
+                       break;
+               case PIPECONF_10BPC:
+                       pipe_config->pipe_bpp = 30;
+                       break;
+               default:
+                       break;
+               }
+       }
+
        intel_get_pipe_timings(crtc, pipe_config);
 
        i9xx_get_pfit_config(crtc, pipe_config);
@@ -5768,9 +5781,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                intel_crtc->config.dpll.p2 = clock.p2;
        }
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
        if (intel_crtc->config.has_pch_encoder) {
                fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5859,6 +5869,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
        tmp = I915_READ(PF_CTL(crtc->pipe));
 
        if (tmp & PF_ENABLE) {
+               pipe_config->pch_pfit.enabled = true;
                pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
                pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
 
@@ -5886,6 +5897,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case PIPECONF_10BPC:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case PIPECONF_12BPC:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
                struct intel_shared_dpll *pll;
 
@@ -6236,7 +6264,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
                if (!crtc->base.enabled)
                        continue;
 
-               if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+               if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
                    crtc->config.cpu_transcoder != TRANSCODER_EDP)
                        enable = true;
        }
@@ -6259,9 +6287,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
        if (!intel_ddi_pll_mode_set(crtc))
                return -EINVAL;
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
 
@@ -6494,15 +6519,15 @@ static void haswell_write_eld(struct drm_connector *connector,
 
        /* Set ELD valid state */
        tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
        tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
        I915_WRITE(aud_cntrl_st2, tmp);
        tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
 
        /* Enable HDMI mode */
        tmp = I915_READ(aud_config);
-       DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
        /* clear N_programing_enable and N_value_index */
        tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
        I915_WRITE(aud_config, tmp);
@@ -6937,7 +6962,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        intel_crtc->cursor_width = width;
        intel_crtc->cursor_height = height;
 
-       intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+       if (intel_crtc->active)
+               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
        return 0;
 fail_unpin:
@@ -6956,7 +6982,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        intel_crtc->cursor_x = x;
        intel_crtc->cursor_y = y;
 
-       intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+       if (intel_crtc->active)
+               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
        return 0;
 }
@@ -8205,9 +8232,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->gmch_pfit.control,
                      pipe_config->gmch_pfit.pgm_ratios,
                      pipe_config->gmch_pfit.lvds_border_bits);
-       DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+       DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
                      pipe_config->pch_pfit.pos,
-                     pipe_config->pch_pfit.size);
+                     pipe_config->pch_pfit.size,
+                     pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
 }
 
@@ -8603,8 +8631,11 @@ intel_pipe_config_compare(struct drm_device *dev,
        if (INTEL_INFO(dev)->gen < 4)
                PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
        PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-       PIPE_CONF_CHECK_I(pch_pfit.pos);
-       PIPE_CONF_CHECK_I(pch_pfit.size);
+       PIPE_CONF_CHECK_I(pch_pfit.enabled);
+       if (current_config->pch_pfit.enabled) {
+               PIPE_CONF_CHECK_I(pch_pfit.pos);
+               PIPE_CONF_CHECK_I(pch_pfit.size);
+       }
 
        PIPE_CONF_CHECK_I(ips_enabled);
 
@@ -8614,6 +8645,9 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+               PIPE_CONF_CHECK_I(pipe_bpp);
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
@@ -10047,33 +10081,6 @@ static void i915_disable_vga(struct drm_device *dev)
        POSTING_READ(vga_reg);
 }
 
-static void i915_enable_vga_mem(struct drm_device *dev)
-{
-       /* Enable VGA memory on Intel HD */
-       if (HAS_PCH_SPLIT(dev)) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-               outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
-               vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
-                                                  VGA_RSRC_LEGACY_MEM |
-                                                  VGA_RSRC_NORMAL_IO |
-                                                  VGA_RSRC_NORMAL_MEM);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-       }
-}
-
-void i915_disable_vga_mem(struct drm_device *dev)
-{
-       /* Disable VGA memory on Intel HD */
-       if (HAS_PCH_SPLIT(dev)) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-               outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
-               vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
-                                                  VGA_RSRC_NORMAL_IO |
-                                                  VGA_RSRC_NORMAL_MEM);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-       }
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
        intel_init_power_well(dev);
@@ -10352,7 +10359,6 @@ void i915_redisable_vga(struct drm_device *dev)
        if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
                i915_disable_vga(dev);
-               i915_disable_vga_mem(dev);
        }
 }
 
@@ -10566,8 +10572,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_disable_fbc(dev);
 
-       i915_enable_vga_mem(dev);
-
        intel_disable_gt_powersave(dev);
 
        ironlake_teardown_rc6(dev);
index 2151d13772b8248cf1f6d6d71c2720fa074fe435..1a431377d83b76ad22bccf795db770ab3b40bd3e 100644 (file)
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        DRM_DEBUG_KMS("aux_ch native nack\n");
                        return -EREMOTEIO;
                case AUX_NATIVE_REPLY_DEFER:
-                       udelay(100);
+                       /*
+                        * For now, just give more slack to branch devices. We
+                        * could check the DPCD for I2C bit rate capabilities,
+                        * and if available, adjust the interval. We could also
+                        * be more careful with DP-to-Legacy adapters where a
+                        * long legacy cable may force very low I2C bit rates.
+                        */
+                       if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                           DP_DWN_STRM_PORT_PRESENT)
+                               usleep_range(500, 600);
+                       else
+                               usleep_range(300, 400);
                        continue;
                default:
                        DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
@@ -1390,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
                else
                        pipe_config->port_clock = 270000;
        }
+
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+           pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+               /*
+                * This is a big fat ugly hack.
+                *
+                * Some machines in UEFI boot mode provide us a VBT that has 18
+                * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+                * unknown we fail to light up. Yet the same BIOS boots up with
+                * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+                * max, not what it tells us to use.
+                *
+                * Note: This will still be broken if the eDP panel is not lit
+                * up by the BIOS, and thus we can't get the mode at module
+                * load.
+                */
+               DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+                             pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+               dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+       }
 }
 
 static bool is_edp_psr(struct intel_dp *intel_dp)
@@ -1456,7 +1487,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
 
        /* Avoid continuous PSR exit by masking memup and hpd */
        I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
-                  EDP_PSR_DEBUG_MASK_HPD);
+                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
        intel_dp->psr_setup_done = true;
 }
index a47799e832c6e61f5ff02afbf35ebe2cde73a4ed..9b7b68fd5d47cf6c979c8fdffd412d8381aabc0e 100644 (file)
@@ -280,6 +280,7 @@ struct intel_crtc_config {
        struct {
                u32 pos;
                u32 size;
+               bool enabled;
        } pch_pfit;
 
        /* FDI configuration, only valid if has_pch_encoder is set. */
@@ -792,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
 extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-extern void i915_disable_vga_mem(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
index 406303b509c1c0afeed4be9379060e5417e212e0..7fa7df546c1ee6e36e119af34d6e79a9d7015e82 100644 (file)
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
                C(vtotal);
                C(clock);
 #undef C
+
+               drm_mode_set_crtcinfo(adjusted_mode, 0);
        }
 
        if (intel_dvo->dev.dev_ops->mode_fixup)
index 42114ecbae0e3c4c8dc51b7b02a1f658de857873..293564a2896a19038cc87ecba7648bce511388d2 100644 (file)
@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
 done:
        pipe_config->pch_pfit.pos = (x << 16) | y;
        pipe_config->pch_pfit.size = (width << 16) | height;
+       pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
 }
 
 static void
index 0c115cc4899ffbe00de6ca305d5cd32bd2590405..26c2ea3e985c22d7011e68f3e73d14fa8e896131 100644 (file)
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
                                    struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t pixel_rate, pfit_size;
+       uint32_t pixel_rate;
 
        pixel_rate = intel_crtc->config.adjusted_mode.clock;
 
        /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
         * adjust the pixel_rate here. */
 
-       pfit_size = intel_crtc->config.pch_pfit.size;
-       if (pfit_size) {
+       if (intel_crtc->config.pch_pfit.enabled) {
                uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+               uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
                pipe_w = intel_crtc->config.requested_mode.hdisplay;
                pipe_h = intel_crtc->config.requested_mode.vdisplay;
@@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
 
-       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
-
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
        gen6_enable_rps_interrupts(dev);
@@ -4761,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
         * gating for the panel power sequencer or it will fail to
         * start up when no ports are active.
         */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
        /* The below fixes the weird display corruption, a few pixels shifted
@@ -4955,6 +4955,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
                        GEN7_WA_L3_CHICKEN_MODE);
 
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+       I915_WRITE(HSW_ROW_CHICKEN3,
+                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                        I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5681,5 +5686,7 @@ void intel_pm_init(struct drm_device *dev)
 
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
+
+       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
 }
 
index 85037b9d4934d406306634c486ed93cc92b771f3..49482fd5b76c6cad80298d2d2f67c0050c73a93d 100644 (file)
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        uint16_t h_sync_offset, v_sync_offset;
        int mode_clock;
 
+       memset(dtd, 0, sizeof(*dtd));
+
        width = mode->hdisplay;
        height = mode->vdisplay;
 
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
-       dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
-       dtd->part2.reserved = 0;
 }
 
-static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
                                         const struct intel_sdvo_dtd *dtd)
 {
-       mode->hdisplay = dtd->part1.h_active;
-       mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
-       mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
-       mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
-       mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
-       mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
-       mode->htotal = mode->hdisplay + dtd->part1.h_blank;
-       mode->htotal += (dtd->part1.h_high & 0xf) << 8;
-
-       mode->vdisplay = dtd->part1.v_active;
-       mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
-       mode->vsync_start = mode->vdisplay;
-       mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
-       mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
-       mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
-       mode->vsync_end = mode->vsync_start +
+       struct drm_display_mode mode = {};
+
+       mode.hdisplay = dtd->part1.h_active;
+       mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+       mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+       mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+       mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+       mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+       mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+       mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+       mode.vdisplay = dtd->part1.v_active;
+       mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+       mode.vsync_start = mode.vdisplay;
+       mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+       mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+       mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+       mode.vsync_end = mode.vsync_start +
                (dtd->part2.v_sync_off_width & 0xf);
-       mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
-       mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
-       mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+       mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+       mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+       mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
 
-       mode->clock = dtd->part1.clock * 10;
+       mode.clock = dtd->part1.clock * 10;
 
-       mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
        if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
-               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+               mode.flags |= DRM_MODE_FLAG_INTERLACE;
        if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
-               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+               mode.flags |= DRM_MODE_FLAG_PHSYNC;
+       else
+               mode.flags |= DRM_MODE_FLAG_NHSYNC;
        if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
-               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+               mode.flags |= DRM_MODE_FLAG_PVSYNC;
+       else
+               mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+       drm_mode_set_crtcinfo(&mode, 0);
+
+       drm_mode_copy(pmode, &mode);
 }
 
 static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
index f2c6d7909ae2d66ffa73ebe77d7c3b06d46403f7..dd6f84bf6c220e94c7f50e1d38ca7fea9f11cecb 100644 (file)
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
+       /* TV has it's own notion of sync and other mode flags, so clear them. */
+       pipe_config->adjusted_mode.flags = 0;
+
+       /*
+        * FIXME: We don't check whether the input mode is actually what we want
+        * or whether userspace is doing something stupid.
+        */
+
        return true;
 }
 
index a60584763b61dde085f39c8058e4cb6900e2bace..a0b9d8a95b16c17ad6b11ae1b6da662e80484b04 100644 (file)
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
 
        /* reset completed fence seqno, just discard anything pending: */
        adreno_gpu->memptrs->fence = gpu->submitted_fence;
+       adreno_gpu->memptrs->rptr  = 0;
+       adreno_gpu->memptrs->wptr  = 0;
 
        gpu->funcs->pm_resume(gpu);
        ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
                        return;
        } while(time_before(jiffies, t));
 
-       DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+       DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
 
        /* TODO maybe we need to reset GPU here to recover from hang? */
 }
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        uint32_t freedwords;
+       unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
        do {
                uint32_t size = gpu->rb->size / 4;
                uint32_t wptr = get_wptr(gpu->rb);
                uint32_t rptr = adreno_gpu->memptrs->rptr;
                freedwords = (rptr + (size - 1) - wptr) % size;
+
+               if (time_after(jiffies, t)) {
+                       DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+                       break;
+               }
        } while(freedwords < ndwords);
 }
 
index 5db5bbaedae21d64b9764fc8d009dfa86b862543..bc7fd11ad8be4e7d4785d0f696d128220f25b7ca 100644 (file)
@@ -19,8 +19,6 @@
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 
-#include <mach/iommu.h>
-
 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
 
 static int mdp4_hw_init(struct msm_kms *kms)
index 864c9773636baf6aa7173c6b16a5f81841a2b922..b3a2f16290417cb055a8d7d89dc8f3f876b3a090 100644 (file)
@@ -18,8 +18,6 @@
 #include "msm_drv.h"
 #include "msm_gpu.h"
 
-#include <mach/iommu.h>
-
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
        int i, ret;
 
        for (i = 0; i < cnt; i++) {
+               /* TODO maybe some day msm iommu won't require this hack: */
+               struct device *msm_iommu_get_ctx(const char *ctx_name);
                struct device *ctx = msm_iommu_get_ctx(names[i]);
                if (!ctx)
                        continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
                 * imx drm driver on iMX5
                 */
                dev_err(dev->dev, "failed to load kms\n");
-               ret = PTR_ERR(priv->kms);
+               ret = PTR_ERR(kms);
                goto fail;
        }
 
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                struct timespec *timeout)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
-       unsigned long start_jiffies = jiffies;
-       unsigned long remaining_jiffies;
        int ret;
 
-       if (time_after(start_jiffies, timeout_jiffies))
-               remaining_jiffies = 0;
-       else
-               remaining_jiffies = timeout_jiffies - start_jiffies;
-
-       ret = wait_event_interruptible_timeout(priv->fence_event,
-                       priv->completed_fence >= fence,
-                       remaining_jiffies);
-       if (ret == 0) {
-               DBG("timeout waiting for fence: %u (completed: %u)",
-                               fence, priv->completed_fence);
-               ret = -ETIMEDOUT;
-       } else if (ret != -ERESTARTSYS) {
-               ret = 0;
+       if (!priv->gpu)
+               return 0;
+
+       if (fence > priv->gpu->submitted_fence) {
+               DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+                               fence, priv->gpu->submitted_fence);
+               return -EINVAL;
+       }
+
+       if (!timeout) {
+               /* no-wait: */
+               ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+       } else {
+               unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+               unsigned long start_jiffies = jiffies;
+               unsigned long remaining_jiffies;
+
+               if (time_after(start_jiffies, timeout_jiffies))
+                       remaining_jiffies = 0;
+               else
+                       remaining_jiffies = timeout_jiffies - start_jiffies;
+
+               ret = wait_event_interruptible_timeout(priv->fence_event,
+                               fence_completed(dev, fence),
+                               remaining_jiffies);
+
+               if (ret == 0) {
+                       DBG("timeout waiting for fence: %u (completed: %u)",
+                                       fence, priv->completed_fence);
+                       ret = -ETIMEDOUT;
+               } else if (ret != -ERESTARTSYS) {
+                       ret = 0;
+               }
        }
 
        return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
        .gem_vm_ops         = &vm_ops,
        .dumb_create        = msm_gem_dumb_create,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
-       .dumb_destroy       = msm_gem_dumb_destroy,
+       .dumb_destroy       = drm_gem_dumb_destroy,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
        .debugfs_cleanup    = msm_debugfs_cleanup,
index 80d75094bf0afd9bb24cb34971cd5622259717ca..df8f1d084bc1d76d1ee8dc5db948e7ebf9f87771 100644 (file)
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
 int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
                struct work_struct *work);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, uint32_t fence);
+               struct msm_gpu *gpu, bool write, uint32_t fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
                struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
+static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       return priv->completed_fence >= fence;
+}
+
 static inline int align_pitch(int width, int bpp)
 {
        int bytespp = (bpp + 7) / 8;
index 6b5a6c8c7658c5b91e905ac013779d022d299cfe..2bae46c66a30dd4c3eac623c07dbf55c70d32cb8 100644 (file)
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
                }
 
                msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
-               if (!msm_obj->sgt) {
+               if (IS_ERR(msm_obj->sgt)) {
                        dev_err(dev->dev, "failed to allocate sgt\n");
-                       return ERR_PTR(-ENOMEM);
+                       return ERR_CAST(msm_obj->sgt);
                }
 
                msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
 out:
        switch (ret) {
        case -EAGAIN:
-               set_need_resched();
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 }
 
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset)
 {
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
 }
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, uint32_t fence)
+               struct msm_gpu *gpu, bool write, uint32_t fence)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        msm_obj->gpu = gpu;
-       msm_obj->fence = fence;
+       if (write)
+               msm_obj->write_fence = fence;
+       else
+               msm_obj->read_fence = fence;
        list_del_init(&msm_obj->mm_list);
        list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 }
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        msm_obj->gpu = NULL;
-       msm_obj->fence = 0;
+       msm_obj->read_fence = 0;
+       msm_obj->write_fence = 0;
        list_del_init(&msm_obj->mm_list);
        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        int ret = 0;
 
-       if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
-               ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+       if (is_active(msm_obj)) {
+               uint32_t fence = 0;
+
+               if (op & MSM_PREP_READ)
+                       fence = msm_obj->write_fence;
+               if (op & MSM_PREP_WRITE)
+                       fence = max(fence, msm_obj->read_fence);
+               if (op & MSM_PREP_NOSYNC)
+                       timeout = NULL;
+
+               ret = msm_wait_fence_interruptable(dev, fence, timeout);
+       }
 
        /* TODO cache maintenance */
 
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
-                       msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+                       msm_obj->read_fence, msm_obj->write_fence,
+                       obj->name, obj->refcount.refcount.counter,
                        off, msm_obj->vaddr, obj->size);
 }
 
index d746f13d283c8cdaf7d885b2cd52e254a98f6061..0676f32e2c6ab917fe9980380c82137a1ad8f7ce 100644 (file)
@@ -36,7 +36,7 @@ struct msm_gem_object {
         */
        struct list_head mm_list;
        struct msm_gpu *gpu;     /* non-null if active */
-       uint32_t fence;
+       uint32_t read_fence, write_fence;
 
        /* Transiently in the process of submit ioctl, objects associated
         * with the submit are on submit->bo_list.. this only lasts for
index 3e1ef3a00f60cc929555768c1cb963b0b265e251..5281d4bc37f750e2162e4bd1f17859e4921c45cc 100644 (file)
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                }
 
                if (submit_bo.flags & BO_INVALID_FLAGS) {
-                       DBG("invalid flags: %x", submit_bo.flags);
+                       DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
                }
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                 */
                obj = idr_find(&file->object_idr, submit_bo.handle);
                if (!obj) {
-                       DBG("invalid handle %u at index %u", submit_bo.handle, i);
+                       DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
                        ret = -EINVAL;
                        goto out_unlock;
                }
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                msm_obj = to_msm_bo(obj);
 
                if (!list_empty(&msm_obj->submit_entry)) {
-                       DBG("handle %u at index %u already on submit list",
+                       DRM_ERROR("handle %u at index %u already on submit list\n",
                                        submit_bo.handle, i);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
                struct msm_gem_object **obj, uint32_t *iova, bool *valid)
 {
        if (idx >= submit->nr_bos) {
-               DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
-               return EINVAL;
+               DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+                               idx, submit->nr_bos);
+               return -EINVAL;
        }
 
        if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
        int ret;
 
        if (offset % 4) {
-               DBG("non-aligned cmdstream buffer: %u", offset);
+               DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
                return -EINVAL;
        }
 
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                        return -EFAULT;
 
                if (submit_reloc.submit_offset % 4) {
-                       DBG("non-aligned reloc offset: %u",
+                       DRM_ERROR("non-aligned reloc offset: %u\n",
                                        submit_reloc.submit_offset);
                        return -EINVAL;
                }
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 
                if ((off >= (obj->base.size / 4)) ||
                                (off < last_offset)) {
-                       DBG("invalid offset %u at reloc %u", off, i);
+                       DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
                        return -EINVAL;
                }
 
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
 
                if (submit_cmd.size % 4) {
-                       DBG("non-aligned cmdstream buffer size: %u",
+                       DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
                                        submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
                }
 
-               if (submit_cmd.size >= msm_obj->base.size) {
-                       DBG("invalid cmdstream size: %u", submit_cmd.size);
+               if ((submit_cmd.size + submit_cmd.submit_offset) >=
+                               msm_obj->base.size) {
+                       DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
                }
index e1e1ec9321ffe5d8db74e53a5d52b8a4122a51d9..3bab937965d1f596405864676464fe6370bf87bd 100644 (file)
 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
 {
        struct drm_device *dev = gpu->dev;
-       struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+       struct kgsl_device_platform_data *pdata;
 
        if (!pdev) {
                dev_err(dev->dev, "could not find dtv pdata\n");
                return;
        }
 
+       pdata = pdev->dev.platform_data;
        if (pdata->bus_scale_table) {
                gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
                DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
 static void hangcheck_handler(unsigned long data)
 {
        struct msm_gpu *gpu = (struct msm_gpu *)data;
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
        uint32_t fence = gpu->funcs->last_fence(gpu);
 
        if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
                gpu->hangcheck_fence = fence;
        } else if (fence < gpu->submitted_fence) {
                /* no progress and not done.. hung! */
-               struct msm_drm_private *priv = gpu->dev->dev_private;
                gpu->hangcheck_fence = fence;
+               dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
+                               gpu->name);
+               dev_err(dev->dev, "%s:     completed fence: %u\n",
+                               gpu->name, fence);
+               dev_err(dev->dev, "%s:     submitted fence: %u\n",
+                               gpu->name, gpu->submitted_fence);
                queue_work(priv->wq, &gpu->recover_work);
        }
 
        /* if still more pending work, reset the hangcheck timer: */
        if (gpu->submitted_fence > gpu->hangcheck_fence)
                hangcheck_timer_reset(gpu);
+
+       /* workaround for missing irq: */
+       queue_work(priv->wq, &gpu->retire_work);
 }
 
 /*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
                obj = list_first_entry(&gpu->active_list,
                                struct msm_gem_object, mm_list);
 
-               if (obj->fence <= fence) {
+               if ((obj->read_fence <= fence) &&
+                               (obj->write_fence <= fence)) {
                        /* move to inactive: */
                        msm_gem_move_to_inactive(&obj->base);
                        msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                                        submit->gpu->id, &iova);
                }
 
-               msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+               if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+                       msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+
+               if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+                       msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
        }
        hangcheck_timer_reset(gpu);
        mutex_unlock(&dev->struct_mutex);
index 37712a6df92358e2583b87a53df4a04cd25ba290..e290cfa4acee09bd8f0a1a2b54462e38abf0bcec 100644 (file)
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
                pmc->use_msi = false;
                break;
        default:
-               pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
+               pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
                if (pmc->use_msi) {
                        pmc->use_msi = pci_enable_msi(device->pdev) == 0;
                        if (pmc->use_msi) {
index 8b3adec5fbb19e8a34cdb53fa83420f7b57f21b2..eae939d3fc1a262b4d0a7277e6d2d242eed99a6b 100644 (file)
@@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
        if (!client)
                return false;
 
-       if (!client->driver || client->driver->detect(client, info)) {
+       if (!client->dev.driver ||
+           to_i2c_driver(client->dev.driver)->detect(client, info)) {
                i2c_unregister_device(client);
                return false;
        }
index dd7d2e18271940ea3e9f6d9a90b09140c767e73e..cfbeee607b3ace2eb0c602919033b570580f2625 100644 (file)
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
 
 static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, nvidia_handle;
-       acpi_status status;
+       acpi_handle dhandle;
        int retval = 0;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
 
-       status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(dhandle, "_DSM"))
                return false;
-       }
 
        if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
                retval |= NOUVEAU_DSM_HAS_MUX;
index acf667859cb6231040914270b5bafbc923004ea8..701c4c10e08b5858a5e083d05329af23c076669e 100644 (file)
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
        }
 
        /* set dma mask for device */
-       /* NOTE: this is a workaround for the hwmod not initializing properly */
-       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail;
 
        omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
 
index 32923d2f60021105a5092826bcd7810ca054c960..5e891b226acf9cf60db309fc8b8c596660fe1d96 100644 (file)
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else if (radeon_connector->use_digital)
+               if (radeon_audio != 0) {
+                       if (radeon_connector->use_digital &&
+                           (radeon_connector->audio == RADEON_AUDIO_ENABLE))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (radeon_connector->use_digital)
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_CRT;
+               } else if (radeon_connector->use_digital) {
                        return ATOM_ENCODER_MODE_DVI;
-               else
+               } else {
                        return ATOM_ENCODER_MODE_CRT;
+               }
                break;
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DisplayPort:
                dig_connector = radeon_connector->con_priv;
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
                        return ATOM_ENCODER_MODE_DP;
-               else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                        (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                         (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               } else if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_eDP:
                return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                         * does the same thing and more.
                         */
                        if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
-                           (rdev->family != CHIP_RS880))
+                           (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
index 05ff315e8e9e03f4d2ba242b4680894cea8632cd..9b6950d9b3c09cc193010a50bdd521939464d539 100644 (file)
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
         { 25000, 30000, RADEON_SCLK_UP }
 };
 
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                    u32 *max_clock)
+{
+       u32 i, clock = 0;
+
+       if ((table == NULL) || (table->count == 0)) {
+               *max_clock = clock;
+               return;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if (clock < table->entries[i].clk)
+                       clock = table->entries[i].clk;
+       }
+       *max_clock = clock;
+}
+
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
                                        u32 clock, u16 max_voltage, u16 *voltage)
 {
@@ -1913,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
 
                        tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1928,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                case MC_SEQ_RESERVE_M >> 2:
@@ -1942,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                default:
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_mclk_switching;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
            btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
                        ps->low.vddci = max_limits->vddci;
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       if (max_sclk_vddc) {
+               if (ps->low.sclk > max_sclk_vddc)
+                       ps->low.sclk = max_sclk_vddc;
+               if (ps->medium.sclk > max_sclk_vddc)
+                       ps->medium.sclk = max_sclk_vddc;
+               if (ps->high.sclk > max_sclk_vddc)
+                       ps->high.sclk = max_sclk_vddc;
+       }
+       if (max_mclk_vddci) {
+               if (ps->low.mclk > max_mclk_vddci)
+                       ps->low.mclk = max_mclk_vddci;
+               if (ps->medium.mclk > max_mclk_vddci)
+                       ps->medium.mclk = max_mclk_vddci;
+               if (ps->high.mclk > max_mclk_vddci)
+                       ps->high.mclk = max_mclk_vddci;
+       }
+       if (max_mclk_vddc) {
+               if (ps->low.mclk > max_mclk_vddc)
+                       ps->low.mclk = max_mclk_vddc;
+               if (ps->medium.mclk > max_mclk_vddc)
+                       ps->medium.mclk = max_mclk_vddc;
+               if (ps->high.mclk > max_mclk_vddc)
+                       ps->high.mclk = max_mclk_vddc;
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index 1a15e0e41950604ec8c27df9b11c6933c43d622d..3b6f12b7760ba48066f2144b73e0dc82c6521946 100644 (file)
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
                                   struct rv7xx_pl *pl);
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
                                        u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                    u32 *max_clock);
 void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
                                   u16 max_vddc, u16 max_vddci,
                                   u16 *vddc, u16 *vddci);
index 8996274430303ab3c2137b080381df612f3f181b..51e947a97edf945d02594dc0731b098ac033e1e0 100644 (file)
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
 };
 
 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                           u32 *max_clock);
 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
                                       u32 arb_freq_src, u32 arb_freq_dest);
 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
        struct radeon_clock_and_voltage_limits *max_limits;
        bool disable_mclk_switching;
        u32 sclk, mclk;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index adbdb6503b0564b98867d67ac513b0d02f602054..9cd2bc989ac713d1604cec5ab1311ac3680066c5 100644 (file)
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
 static void cik_program_aspm(struct radeon_device *rdev);
 static void cik_init_pg(struct radeon_device *rdev);
 static void cik_init_cg(struct radeon_device *rdev);
+static void cik_fini_pg(struct radeon_device *rdev);
+static void cik_fini_cg(struct radeon_device *rdev);
 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
                                          bool enable);
 
@@ -1692,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -2845,10 +2848,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
                rdev->config.cik.tile_config |= (3 << 0);
                break;
        }
-       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-               rdev->config.cik.tile_config |= 1 << 4;
-       else
-               rdev->config.cik.tile_config |= 0 << 4;
+       rdev->config.cik.tile_config |=
+               ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
        rdev->config.cik.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.cik.tile_config |=
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
                return r;
        }
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_fence_wait(ib.fence, false);
        if (r) {
                DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
+               radeon_ib_free(rdev, &ib);
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
@@ -4187,6 +4191,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
 
+       /* disable CG/PG */
+       cik_fini_pg(rdev);
+       cik_fini_cg(rdev);
+
        /* stop the rlc */
        cik_rlc_stop(rdev);
 
@@ -4456,8 +4464,8 @@ static int cik_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* size in MB on si */
-       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        si_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4743,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
        u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
        u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
        u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
-       char *block = (char *)&mc_client;
+       char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+               (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
-       printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+       printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
               protections, vmid, addr,
               (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
-              block, mc_id);
+              block, mc_client, mc_id);
 }
 
 /**
index 85a69d2ea3d2c8d86c9c552902d260dd576ac927..9fcd338c0fcf6bb1e113c80c8b116bcf5c56abbc 100644 (file)
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        if (!dig->afmt->pin)
                return;
 
index 555164e270a79347fb36b9cadab09d90754c4306..b5c67a99dda9b34707c3ba7eb20a682f9c7fe089 100644 (file)
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sx_max_export_size = 256;
                rdev->config.evergreen.sx_max_export_pos_size = 64;
                rdev->config.evergreen.sx_max_export_smx_size = 192;
-               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.max_hw_contexts = 4;
                rdev->config.evergreen.sq_num_cf_insts = 2;
 
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
index f71ce390aebe581dd08998ce29d98e4b1058155c..fe1de855775ecca1491ced31f797f5cac6d05abc 100644 (file)
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
@@ -288,8 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
 
        WREG32(HDMI_ACR_PACKET_CONTROL + offset,
-              HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
-              HDMI_ACR_SOURCE); /* select SW CTS value */
+              HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
        evergreen_hdmi_update_ACR(encoder, mode->clock);
 
index 8768fd6a1e2707acf1006f9afff44eb1a173fd4b..4f6d2962767dced17ae7f5f3e44e79bce4f51bbf 100644 (file)
  * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
                 * 1 - GDS
                 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 93c1f9ef5da9b5ee7c3474bfaaae5bbe7497cf91..cac2866d79da441dfbbbcef860f713c9c7ad5a58 100644 (file)
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "ni_mc: Bogus length %zu in firmware \"%s\"\n",
index 6c398a456d78b53668a73dbc82f369157c918647..f26339028154274609f31247580d03cbbed0d312 100644 (file)
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_mclk_switching;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index 24175717307bc23ca336f3e998df192f3036fccd..d71333033b2ba0bc63f9e710a23f710d7b96b066 100644 (file)
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
-       for (j = 0; j <= count; j++) {
-               i = (rdp + j) & ring->ptr_mask;
-               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+       if (ring->ready) {
+               for (j = 0; j <= count; j++) {
+                       i = (rdp + j) & ring->ptr_mask;
+                       seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+               }
        }
        return 0;
 }
index 2a1b1876b4312eb332c017cb027aeb346bf2c850..f9be22062df1eb8048384cabec6219e8a5238fff 100644 (file)
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "smc: Bogus length %zu in firmware \"%s\"\n",
index e65f211a7be016eb9e3fe09e550ff87e8a12ae9e..5513d8f06252e13e11e27e9d21d5aecae775b5a8 100644 (file)
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
                                rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
                                        le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
                                rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(limits->entries[i].usVoltage);
+                                       le16_to_cpu(entry->usVoltage);
                                entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
                                        ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
                        }
index f443010ce90b1ed7607565474b2ba0851e378789..06022e3b9c3bdc0a0660a60c448e25038659e41d 100644 (file)
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
 static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
     /*      32kHz        44.1kHz       48kHz    */
     /* Clock      N     CTS      N     CTS      N     CTS */
-    {  25174,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
+    {  25175,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
     {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
     {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
     {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
     {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
     {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
-    {  74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
+    {  74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
     {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
-    { 148351, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
+    { 148352, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
     { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
     {      0,  4096,      0,  6272,      0,  6144,      0 }  /* Other */
 };
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
  */
 static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
 {
-       if (*CTS == 0)
-               *CTS = clock * N / (128 * freq) * 1000;
+       u64 n;
+       u32 d;
+
+       if (*CTS == 0) {
+               n = (u64)clock * (u64)N * 1000ULL;
+               d = 128 * freq;
+               do_div(n, d);
+               *CTS = n;
+       }
        DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
                  N, *CTS, freq);
 }
@@ -257,10 +264,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
-       if (ASIC_IS_DCE3(rdev)) {
-               /* according to the reg specs, this should DCE3.2 only, but in
-                * practice it seems to cover DCE3.0 as well.
-                */
+       if (ASIC_IS_DCE32(rdev)) {
                if (dig->dig_encoder == 0) {
                        dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
                        dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +280,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
                        WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
                        WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
                }
+       } else if (ASIC_IS_DCE3(rdev)) {
+               /* according to the reg specs, this should DCE3.2 only, but in
+                * practice it seems to cover DCE3.0/3.1 as well.
+                */
+               if (dig->dig_encoder == 0) {
+                       WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+                       WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+               } else {
+                       WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+                       WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+               }
        } else {
-               /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+               /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
                WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
                       AUDIO_DTO_MODULE(clock / 10));
        }
@@ -292,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
@@ -434,8 +454,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        }
 
        WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
-              HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
-              HDMI0_ACR_SOURCE); /* select SW CTS value */
+              HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+              HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
        WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
               HDMI0_NULL_SEND | /* send null packets when required */
index e673fe26ea84d00b292f77c456089c4312372ab7..7b3c7b5932c5a289be3fd58d05b7392791c6a4ff 100644 (file)
  */
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 5003385a75129098e7519702f49bf86d67fbc8b2..8f7e04538fd624a5ecb1c302e8b9d5f2455f808a 100644 (file)
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_cpdma,
index 404e25d285ba816b1f34308ce79537b8a0d44a90..f79ee184ffd5849f4d0e0ec1b87b0d94d0f1f131 100644 (file)
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
        int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
        uint16_t data_offset, size;
        struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+       struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
        uint8_t frev, crev;
        int i, num_indices;
 
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 
                num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                        sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-
+               ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+                       ((u8 *)&ss_info->asSS_Info[0]);
                for (i = 0; i < num_indices; i++) {
-                       if (ss_info->asSS_Info[i].ucSS_Id == id) {
+                       if (ss_assign->ucSS_Id == id) {
                                ss->percentage =
-                                       le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
-                               ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
-                               ss->step = ss_info->asSS_Info[i].ucSS_Step;
-                               ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
-                               ss->range = ss_info->asSS_Info[i].ucSS_Range;
-                               ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+                                       le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+                               ss->type = ss_assign->ucSpreadSpectrumType;
+                               ss->step = ss_assign->ucSS_Step;
+                               ss->delay = ss_assign->ucSS_Delay;
+                               ss->range = ss_assign->ucSS_Range;
+                               ss->refdiv = ss_assign->ucRecommendedRef_Div;
                                return true;
                        }
+                       ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+                               ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
                }
        }
        return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
        struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
 };
 
+union asic_ss_assignment {
+       struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+       struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+       struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
 bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                      struct radeon_atom_ss *ss,
                                      int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
        int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
        uint16_t data_offset, size;
        union asic_ss_info *ss_info;
+       union asic_ss_assignment *ss_assign;
        uint8_t frev, crev;
        int i, num_indices;
 
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v1.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+                                               le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
                        }
                        break;
                case 2:
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v2.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                               le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
                                        if ((crev == 2) &&
                                            ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                             (id == ASIC_INTERNAL_MEMORY_SS)))
                                                ss->rate /= 100;
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
                        }
                        break;
                case 3:
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v3.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                               le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
                                        if ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                            (id == ASIC_INTERNAL_MEMORY_SS))
                                                ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                                radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
                        }
                        break;
                default:
index 79159b5da05bc778dc71b819a32c4139d28f967b..64565732cb98cf25af4e4d1f564e307c85cf4a8f 100644 (file)
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                        drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_vborder_property,
                                                      0);
-                       drm_object_attach_property(&radeon_connector->base.base,
-                                                  rdev->mode_info.audio_property,
-                                                  RADEON_AUDIO_DISABLE);
+                       if (radeon_audio != 0)
+                               drm_object_attach_property(&radeon_connector->base.base,
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
                        if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        connector->interlace_allowed = true;
                        /* in theory with a DP to VGA converter... */
index ac6ece61a47627931e9a3bcb68c3a34c636efe0a..80285e35bc6513fda3d5f5c975399a60feaab1e3 100644 (file)
@@ -85,7 +85,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                   VRAM, also but everything into VRAM on AGP cards to avoid
                   image corruptions */
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-                   (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
+                   (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
                        /* TODO: is this still needed for NI+ ? */
                        p->relocs[i].lobj.domain =
                                RADEON_GEM_DOMAIN_VRAM;
index e29faa73b574c6b989a9c501d15032a218ae849b..841d0e09be3e9fb109b5afd45d012590a0ee6433 100644 (file)
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
                        return r;
        }
        if ((radeon_testing & 1)) {
-               radeon_test_moves(rdev);
+               if (rdev->accel_working)
+                       radeon_test_moves(rdev);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
        }
        if ((radeon_testing & 2)) {
-               radeon_test_syncing(rdev);
+               if (rdev->accel_working)
+                       radeon_test_syncing(rdev);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
        }
        if (radeon_benchmarking) {
-               radeon_benchmark(rdev, radeon_benchmarking);
+               if (rdev->accel_working)
+                       radeon_benchmark(rdev, radeon_benchmarking);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
        }
        return 0;
 }
index cdd12dcd988b1ed3260076b6d984cbf388dc35a7..9c14a1ba1de43aa9086741fda75c9aba3cc96394 100644 (file)
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = -1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = -1;
@@ -196,7 +196,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
-MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
index 87e1d69e8fdb3b859af4ed3d809ea93db40f2c07..4f6b7fc7ad3cad3a90017c6d7f75192c4c55452a 100644 (file)
@@ -945,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                if (enable) {
                        mutex_lock(&rdev->pm.mutex);
                        rdev->pm.dpm.uvd_active = true;
+                       /* disable this for now */
+#if 0
                        if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
                        else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -954,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                        else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
                        else
+#endif
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
                        rdev->pm.dpm.state = dpm_state;
                        mutex_unlock(&rdev->pm.mutex);
@@ -1002,7 +1005,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
 {
        /* set up the default clocks if the MC ucode is loaded */
        if ((rdev->family >= CHIP_BARTS) &&
-           (rdev->family <= CHIP_HAINAN) &&
+           (rdev->family <= CHIP_CAYMAN) &&
            rdev->mc_fw) {
                if (rdev->pm.default_vddc)
                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1049,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
        if (ret) {
                DRM_ERROR("radeon: dpm resume failed\n");
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1100,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
                radeon_pm_init_profile(rdev);
                /* set up the default clocks if the MC ucode is loaded */
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1186,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
        if (ret) {
                rdev->pm.dpm_enabled = false;
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
index 46a25f037b843b70ebfb04e77ad55901111efd02..18254e1c3e718ee1c7c4cdd4321bf95b15581970 100644 (file)
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
         * packet that is the root issue
         */
        i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-       for (j = 0; j <= (count + 32); j++) {
-               seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-               i = (i + 1) & ring->ptr_mask;
+       if (ring->ready) {
+               for (j = 0; j <= (count + 32); j++) {
+                       seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+                       i = (i + 1) & ring->ptr_mask;
+               }
        }
        return 0;
 }
index f4d6bcee9006451ca377b94ece370806a8f89d2d..12e8099a0823e23a1218c1e5a3f07e78bbfbfcd0 100644 (file)
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
        struct radeon_bo *vram_obj = NULL;
        struct radeon_bo **gtt_obj = NULL;
        uint64_t gtt_addr, vram_addr;
-       unsigned i, n, size;
-       int r, ring;
+       unsigned n, size;
+       int i, r, ring;
 
        switch (flag) {
        case RADEON_TEST_COPY_DMA:
index 1a01bbff9bfa4f5c9ca1d354cd99abae981a36ce..308eff5be1b420b74b18ccb1b1e0c89f4bd67f32 100644 (file)
@@ -799,7 +799,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
                    (rdev->pm.dpm.hd != hd)) {
                        rdev->pm.dpm.sd = sd;
                        rdev->pm.dpm.hd = hd;
-                       streams_changed = true;
+                       /* disable this for now */
+                       /*streams_changed = true;*/
                }
        }
 
index c354c1094967990a46caea46612caed123bc9b51..d96f7cbca0a115f58eaeca9df3a6e585d6b3445d 100644 (file)
@@ -85,6 +85,9 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
                               uint32_t incr, uint32_t flags);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
                                         bool enable);
+static void si_fini_pg(struct radeon_device *rdev);
+static void si_fini_cg(struct radeon_device *rdev);
+static void si_rlc_stop(struct radeon_device *rdev);
 
 static const u32 verde_rlc_save_restore_register_list[] =
 {
@@ -1678,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
                       fw_name);
                release_firmware(rdev->smc_fw);
                rdev->smc_fw = NULL;
+               err = 0;
        } else if (rdev->smc_fw->size != smc_req_size) {
                printk(KERN_ERR
                       "si_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3608,6 +3612,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
 
+       /* disable PG/CG */
+       si_fini_pg(rdev);
+       si_fini_cg(rdev);
+
+       /* stop the rlc */
+       si_rlc_stop(rdev);
+
        /* Disable CP parsing/prefetching */
        WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
 
index cfe5d4d289159c832d71375e6eca8090adab7c3f..2332aa1bf93c7c40936710c7e8595c6d49f6514b 100644 (file)
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_sclk_switching = false;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
@@ -5184,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
                        }
                        j++;
-                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
 
                        if (!pi->mem_gddr5) {
@@ -5194,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        table->mc_reg_table_entry[k].mc_data[j] =
                                                (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
                                j++;
-                               if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                        return -EINVAL;
                        }
                        break;
@@ -5207,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        (temp_reg & 0xffff0000) |
                                        (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
                        j++;
-                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                default:
index 52d2ab6b67a0b8876bdfa1710756f582b3927382..7e2e0ea66a008f491f5396bada706ca469b23f0b 100644 (file)
  * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
                 * 1 - GDS
                 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 7f998bf1cc9dd796b412dae45184dc5f1c76f184..9364129ba292e1b5412a0d10818ed158c95bb0fa 100644 (file)
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       pi->enable_bapm = true;
+       pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index 1a90f0a2f7e5aa7b994558b18a3cffd2d1254ee8..0508f93b9795fbc2b4ccf9c8f34ea62383a7e138 100644 (file)
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
        struct vmw_fpriv *vmw_fp;
 
        vmw_fp = vmw_fpriv(file_priv);
-       ttm_object_file_release(&vmw_fp->tfile);
-       if (vmw_fp->locked_master)
+
+       if (vmw_fp->locked_master) {
+               struct vmw_master *vmaster =
+                       vmw_master(vmw_fp->locked_master);
+
+               ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+               ttm_vt_unlock(&vmaster->lock);
                drm_master_put(&vmw_fp->locked_master);
+       }
+
+       ttm_object_file_release(&vmw_fp->tfile);
        kfree(vmw_fp);
 }
 
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
                drm_master_put(&vmw_fp->locked_master);
        }
 
-       ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+       ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        if (!dev_priv->enable_fb) {
                ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
index 0e67cf41065d801e6526d23dc5cdf375bdabe1c7..37fb4befec82634ccf3a504428af8ed60f4a0e56 100644 (file)
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (new_backup)
                res->backup_offset = new_backup_offset;
 
-       if (!res->func->may_evict)
+       if (!res->func->may_evict || res->id == -1)
                return;
 
        write_lock(&dev_priv->resource_lock);
index 71b70e3a7a7183068dbc18d3b224bf3894444497..92c6e273339b0cd44ff38c4ead4c32a37e20d6ab 100644 (file)
@@ -241,6 +241,7 @@ config HID_HOLTEK
          - Sharkoon Drakonia / Perixx MX-2000 gaming mice
          - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
            Zalman ZM-GM1
+         - SHARKOON DarkGlider Gaming mouse
 
 config HOLTEK_FF
        bool "Holtek On Line Grip force feedback support"
@@ -322,7 +323,7 @@ config HID_LCPOWER
 
 config HID_LENOVO_TPKBD
        tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
-       depends on USB_HID
+       depends on HID
        select NEW_LEDS
        select LEDS_CLASS
        ---help---
@@ -361,19 +362,20 @@ config LOGITECH_FF
          - Logitech WingMan Force 3D
          - Logitech Formula Force EX
          - Logitech WingMan Formula Force GP
-         - Logitech MOMO Force wheel
 
          and if you want to enable force feedback for them.
          Note: if you say N here, this device will still be supported, but without
          force feedback.
 
 config LOGIRUMBLEPAD2_FF
-       bool "Logitech RumblePad/Rumblepad 2 force feedback support"
+       bool "Logitech force feedback support (variant 2)"
        depends on HID_LOGITECH
        select INPUT_FF_MEMLESS
        help
-         Say Y here if you want to enable force feedback support for Logitech
-         RumblePad and Rumblepad 2 devices.
+         Say Y here if you want to enable force feedback support for:
+         - Logitech RumblePad
+         - Logitech Rumblepad 2
+         - Logitech Formula Vibration Feedback Wheel
 
 config LOGIG940_FF
        bool "Logitech Flight System G940 force feedback support"
index 881cf7b4f9a433f8ae3e0f6721b3789aa66731f7..497558127bb3e63609af2b5e1f75265f9e7b1423 100644 (file)
@@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644);
 MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
                "(0 = disabled, [1] = enabled)");
 
+static unsigned int swap_opt_cmd;
+module_param(swap_opt_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
+               "(For people who want to keep Windows PC keyboard muscle memory. "
+               "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+
 struct apple_sc {
        unsigned long quirks;
        unsigned int fn_on;
@@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = {
        { }
 };
 
+static const struct apple_key_translation swapped_option_cmd_keys[] = {
+       { KEY_LEFTALT,  KEY_LEFTMETA },
+       { KEY_LEFTMETA, KEY_LEFTALT },
+       { KEY_RIGHTALT, KEY_RIGHTMETA },
+       { KEY_RIGHTMETA,KEY_RIGHTALT },
+       { }
+};
+
 static const struct apple_key_translation *apple_find_translation(
                const struct apple_key_translation *table, u16 from)
 {
@@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
                }
        }
 
+       if (swap_opt_cmd) {
+               trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+               if (trans) {
+                       input_event(input, usage->type, trans->to, value);
+                       return 1;
+               }
+       }
+
        return 0;
 }
 
index b8470b1a10fe8b40bef83386a0476591e5a067d7..351b8f33a4ac01472b6b90b12c0dde9e5956b4ad 100644 (file)
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
 
 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 {
-       __u32 raw_value;
+       __s32 raw_value;
        switch (item->tag) {
        case HID_GLOBAL_ITEM_TAG_PUSH:
 
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
                return 0;
 
        case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
-               /* Units exponent negative numbers are given through a
-                * two's complement.
-                * See "6.2.2.7 Global Items" for more information. */
-               raw_value = item_udata(item);
+               /* Many devices provide unit exponent as a two's complement
+                * nibble due to the common misunderstanding of HID
+                * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
+                * both this and the standard encoding. */
+               raw_value = item_sdata(item);
                if (!(raw_value & 0xfffffff0))
                        parser->global.unit_exponent = hid_snto32(raw_value, 4);
                else
@@ -1715,6 +1716,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
@@ -1752,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1869,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
 };
index f042a6cf8b18c897fce2586f108bd91daaf9d33e..4e49462870abdf2f265c1528ae6c07fa4f783ec8 100644 (file)
@@ -181,7 +181,40 @@ fail:
  */
 static bool elo_broken_firmware(struct usb_device *dev)
 {
-       return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
+       struct usb_device *hub = dev->parent;
+       struct usb_device *child = NULL;
+       u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
+       u16 child_vid, child_pid;
+       int i;
+    
+       if (!use_fw_quirk)
+               return false;
+       if (fw_lvl != 0x10d)
+               return false;
+
+       /* iterate sibling devices of the touch controller */
+       usb_hub_for_each_child(hub, i, child) {
+               child_vid = le16_to_cpu(child->descriptor.idVendor);
+               child_pid = le16_to_cpu(child->descriptor.idProduct);
+
+               /*
+                * If one of the devices below is present attached as a sibling of 
+                * the touch controller then  this is a newer IBM 4820 monitor that 
+                * does not need the IBM-requested workaround if fw level is
+                * 0x010d - aka 'M'.
+                * No other HW can have this combination.
+                */
+               if (child_vid==0x04b3) {
+                       switch (child_pid) {
+                       case 0x4676: /* 4820 21x Video */
+                       case 0x4677: /* 4820 51x Video */
+                       case 0x4678: /* 4820 2Lx Video */
+                       case 0x4679: /* 4820 5Lx Video */
+                               return false;
+                       }
+               }
+       }
+       return true;
 }
 
 static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
index 7e6db3cf46f9eb39746fcc4ac4b4e29296caa569..e696566cde46420334d1f416b53c2675ec581a7b 100644 (file)
@@ -27,6 +27,7 @@
  * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000
  * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
  *   and Zalman ZM-GM1
+ * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
  */
 
 static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                        }
                        break;
                case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
+               case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
                        if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
                                        && rdesc[111] == 0xff && rdesc[112] == 0x7f) {
                                hid_info(hdev, "Fixing up report descriptor\n");
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+                       USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
index e60e8d530697fcaf0a7c4a43f42e53b455de6f4d..c99facee7feb0b0e5fd6b7aaea120fb0c0d29364 100644 (file)
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD      0xa055
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067    0xa067
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A    0xa04a
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081    0xa081
 
 #define USB_VENDOR_ID_IMATION          0x0718
 #define USB_DEVICE_ID_DISC_STAKKA      0xd000
 #define USB_DEVICE_ID_DINOVO_EDGE      0xc714
 #define USB_DEVICE_ID_DINOVO_MINI      0xc71f
 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2     0xca03
+#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
 
 #define USB_VENDOR_ID_LUMIO            0x202e
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN   0x0003
 
 #define USB_VENDOR_ID_NINTENDO         0x057e
+#define USB_VENDOR_ID_NINTENDO2                0x054c
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2        0x0330
 
 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP        0x0009
 #define USB_DEVICE_ID_SYNAPTICS_WTP    0x0010
 #define USB_DEVICE_ID_SYNAPTICS_DPAD   0x0013
+#define USB_DEVICE_ID_SYNAPTICS_LTS1   0x0af8
+#define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 
 #define USB_VENDOR_ID_THINGM           0x27b8
 #define USB_DEVICE_ID_BLINK1           0x01ed
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
+#define USB_VENDOR_ID_SIS      0x0457
+#define USB_DEVICE_ID_SIS_TS   0x1013
+
 #endif
index 8741d953dcc80acb552bac187ffe6997ad95ca4e..d97f2323af573ecf229f9d76b536634c57c26e17 100644 (file)
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
        return -EINVAL;
 }
 
+
 /**
  * hidinput_calc_abs_res - calculate an absolute axis resolution
  * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
        case ABS_MT_TOOL_Y:
        case ABS_MT_TOUCH_MAJOR:
        case ABS_MT_TOUCH_MINOR:
-               if (field->unit & 0xffffff00)           /* Not a length */
-                       return 0;
-               unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
-               switch (field->unit & 0xf) {
-               case 0x1:                               /* If centimeters */
+               if (field->unit == 0x11) {              /* If centimeters */
                        /* Convert to millimeters */
                        unit_exponent += 1;
-                       break;
-               case 0x3:                               /* If inches */
+               } else if (field->unit == 0x13) {       /* If inches */
                        /* Convert to millimeters */
                        prev = physical_extents;
                        physical_extents *= 254;
                        if (physical_extents < prev)
                                return 0;
                        unit_exponent -= 1;
-                       break;
-               default:
+               } else {
                        return 0;
                }
                break;
index 31cf29a6ba17551ff5244a0f243d48a4719e662c..2d25b6cbbc051910a8eeaaab0525605c77a88da1 100644 (file)
 #include <linux/module.h>
 #include <linux/sysfs.h>
 #include <linux/device.h>
-#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/leds.h>
-#include "usbhid/usbhid.h"
 
 #include "hid-ids.h"
 
@@ -41,10 +39,9 @@ static int tpkbd_input_mapping(struct hid_device *hdev,
                struct hid_input *hi, struct hid_field *field,
                struct hid_usage *usage, unsigned long **bit, int *max)
 {
-       struct usbhid_device *uhdev;
-
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-       if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+       if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+               /* mark the device as pointer */
+               hid_set_drvdata(hdev, (void *)1);
                map_key_clear(KEY_MICMUTE);
                return 1;
        }
@@ -339,7 +336,7 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        struct tpkbd_data_pointer *data_pointer;
        size_t name_sz = strlen(dev_name(dev)) + 16;
        char *name_mute, *name_micmute;
-       int i, ret;
+       int i;
 
        /* Validate required reports. */
        for (i = 0; i < 4; i++) {
@@ -354,7 +351,9 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
                hid_warn(hdev, "Could not create sysfs group\n");
        }
 
-       data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+       data_pointer = devm_kzalloc(&hdev->dev,
+                                   sizeof(struct tpkbd_data_pointer),
+                                   GFP_KERNEL);
        if (data_pointer == NULL) {
                hid_err(hdev, "Could not allocate memory for driver data\n");
                return -ENOMEM;
@@ -364,20 +363,13 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        data_pointer->sensitivity = 0xa0;
        data_pointer->press_speed = 0x38;
 
-       name_mute = kzalloc(name_sz, GFP_KERNEL);
-       if (name_mute == NULL) {
+       name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       if (name_mute == NULL || name_micmute == NULL) {
                hid_err(hdev, "Could not allocate memory for led data\n");
-               ret = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
        snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-
-       name_micmute = kzalloc(name_sz, GFP_KERNEL);
-       if (name_micmute == NULL) {
-               hid_err(hdev, "Could not allocate memory for led data\n");
-               ret = -ENOMEM;
-               goto err2;
-       }
        snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
 
        hid_set_drvdata(hdev, data_pointer);
@@ -397,19 +389,12 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        tpkbd_features_set(hdev);
 
        return 0;
-
-err2:
-       kfree(name_mute);
-err:
-       kfree(data_pointer);
-       return ret;
 }
 
 static int tpkbd_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
        int ret;
-       struct usbhid_device *uhdev;
 
        ret = hid_parse(hdev);
        if (ret) {
@@ -423,9 +408,8 @@ static int tpkbd_probe(struct hid_device *hdev,
                goto err;
        }
 
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-
-       if (uhdev->ifnum == 1) {
+       if (hid_get_drvdata(hdev)) {
+               hid_set_drvdata(hdev, NULL);
                ret = tpkbd_probe_tp(hdev);
                if (ret)
                        goto err_hid;
@@ -449,17 +433,11 @@ static void tpkbd_remove_tp(struct hid_device *hdev)
        led_classdev_unregister(&data_pointer->led_mute);
 
        hid_set_drvdata(hdev, NULL);
-       kfree(data_pointer->led_micmute.name);
-       kfree(data_pointer->led_mute.name);
-       kfree(data_pointer);
 }
 
 static void tpkbd_remove(struct hid_device *hdev)
 {
-       struct usbhid_device *uhdev;
-
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-       if (uhdev->ifnum == 1)
+       if (hid_get_drvdata(hdev))
                tpkbd_remove_tp(hdev);
 
        hid_hw_stop(hdev);
index 6f12ecd36c8834fa9c9cb77b79d883dd4c246dc8..06eb45fa6331fee64152754a5ab9bb2275e3a61c 100644 (file)
@@ -45,7 +45,9 @@
 /* Size of the original descriptors of the Driving Force (and Pro) wheels */
 #define DF_RDESC_ORIG_SIZE     130
 #define DFP_RDESC_ORIG_SIZE    97
+#define FV_RDESC_ORIG_SIZE     130
 #define MOMO_RDESC_ORIG_SIZE   87
+#define MOMO2_RDESC_ORIG_SIZE  87
 
 /* Fixed report descriptors for Logitech Driving Force (and Pro)
  * wheel controllers
@@ -170,6 +172,73 @@ static __u8 dfp_rdesc_fixed[] = {
 0xC0                /*  End Collection                          */
 };
 
+static __u8 fv_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),                   */
+0x09, 0x04,         /*  Usage (Joystik),                        */
+0xA1, 0x01,         /*  Collection (Application),               */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x0A,         /*          Report Size (10),               */
+0x15, 0x00,         /*          Logical Minimum (0),            */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),         */
+0x35, 0x00,         /*          Physical Minimum (0),           */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),        */
+0x09, 0x30,         /*          Usage (X),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x0C,         /*          Report Count (12),              */
+0x75, 0x01,         /*          Report Size (1),                */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x05, 0x09,         /*          Usage Page (Button),            */
+0x19, 0x01,         /*          Usage Minimum (01h),            */
+0x29, 0x0C,         /*          Usage Maximum (0Ch),            */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x02,         /*          Report Count (2),               */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x02,         /*          Usage (02h),                    */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x25, 0x07,         /*          Logical Maximum (7),            */
+0x46, 0x3B, 0x01,   /*          Physical Maximum (315),         */
+0x75, 0x04,         /*          Report Size (4),                */
+0x65, 0x14,         /*          Unit (Degrees),                 */
+0x09, 0x39,         /*          Usage (Hat Switch),             */
+0x81, 0x42,         /*          Input (Variable, Null State),   */
+0x75, 0x01,         /*          Report Size (1),                */
+0x95, 0x04,         /*          Report Count (4),               */
+0x65, 0x00,         /*          Unit,                           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x09, 0x31,         /*          Usage (Y),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x32,         /*          Usage (Z),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0xC0,               /*      End Collection,                     */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x07,         /*          Report Count (7),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x09, 0x03,         /*          Usage (03h),                    */
+0x91, 0x02,         /*          Output (Variable),              */
+0xC0,               /*      End Collection,                     */
+0xC0                /*  End Collection                          */
+};
+
 static __u8 momo_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
 0x09, 0x04,         /*  Usage (Joystik),                    */
@@ -216,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
 0xC0                /*  End Collection                      */
 };
 
+static __u8 momo2_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),               */
+0x09, 0x04,         /*  Usage (Joystik),                    */
+0xA1, 0x01,         /*  Collection (Application),           */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x0A,         /*          Report Size (10),           */
+0x15, 0x00,         /*          Logical Minimum (0),        */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),     */
+0x35, 0x00,         /*          Physical Minimum (0),       */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),    */
+0x09, 0x30,         /*          Usage (X),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x0A,         /*          Report Count (10),          */
+0x75, 0x01,         /*          Report Size (1),            */
+0x25, 0x01,         /*          Logical Maximum (1),        */
+0x45, 0x01,         /*          Physical Maximum (1),       */
+0x05, 0x09,         /*          Usage Page (Button),        */
+0x19, 0x01,         /*          Usage Minimum (01h),        */
+0x29, 0x0A,         /*          Usage Maximum (0Ah),        */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x95, 0x04,         /*          Report Count (4),           */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x08,         /*          Report Size (8),            */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),      */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),     */
+0x09, 0x01,         /*          Usage (01h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0x05, 0x01,         /*          Usage Page (Desktop),       */
+0x09, 0x31,         /*          Usage (Y),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x09, 0x32,         /*          Usage (Z),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0xC0,               /*      End Collection,                 */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x09, 0x02,         /*          Usage (02h),                */
+0x95, 0x07,         /*          Report Count (7),           */
+0x91, 0x02,         /*          Output (Variable),          */
+0xC0,               /*      End Collection,                 */
+0xC0                /*  End Collection                      */
+};
+
 /*
  * Certain Logitech keyboards send in report #3 keys which are far
  * above the logical maximum described in descriptor. This extends
@@ -275,6 +392,24 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
 
+       case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+               if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
+                       hid_info(hdev,
+                               "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
+                       rdesc = momo2_rdesc_fixed;
+                       *rsize = sizeof(momo2_rdesc_fixed);
+               }
+               break;
+
+       case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+               if (*rsize == FV_RDESC_ORIG_SIZE) {
+                       hid_info(hdev,
+                               "fixing up Logitech Formula Vibration report descriptor\n");
+                       rdesc = fv_rdesc_fixed;
+                       *rsize = sizeof(fv_rdesc_fixed);
+               }
+               break;
+
        case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
                if (*rsize == DFP_RDESC_ORIG_SIZE) {
                        hid_info(hdev,
@@ -492,6 +627,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
                case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
                case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
                case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+               case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
                        field->application = HID_GD_MULTIAXIS;
                        break;
                default:
@@ -639,6 +775,8 @@ static const struct hid_device_id lg_devices[] = {
                .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
                .driver_data = LG_FF4 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
+               .driver_data = LG_FF2 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
                .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
index 1a42eaa6ca0234a054631544732db69484c5eb82..0e3fb1a7e42174dd1dfb953e7209fc14ab50ed7a 100644 (file)
@@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid)
 
        hid_hw_request(hid, report, HID_REQ_SET_REPORT);
 
-       hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
+       hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
        return 0;
 }
index 5e5fe1b8eebb73e5d4533997a889eee87d637150..cb3250c5a397f9ba19b5974792e1cee374e9f9f0 100644 (file)
@@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = {
        { .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
                        MT_QUIRK_VALID_IS_INRANGE |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+                       MT_QUIRK_SLOT_IS_CONTACTID,
                .maxcontacts = 2
        },
        { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER
+                       MT_QUIRK_SLOT_IS_CONTACTID
        },
 
        { .name = MT_CLS_FLATFROG,
@@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
                        USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+       { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
 
        /* Gametel game controller */
        { .driver_data = MT_CLS_NSMU,
index 602c188e9d86cafedde93366bf3d114e69c3e552..6101816a7ddd8a96ae8ad0e9ecfa297c29fa637f 100644 (file)
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
 }
 #define PROFILE_ATTR(number)                                   \
 static struct bin_attribute bin_attr_profile##number = {       \
-       .attr = { .name = "profile##number", .mode = 0660 },    \
+       .attr = { .name = "profile" #number, .mode = 0660 },    \
        .size = sizeof(struct kone_profile),                    \
        .read = kone_sysfs_read_profilex,                       \
        .write = kone_sysfs_write_profilex,                     \
index 5ddf605b6b890b15c2b9f94783072d951908e5db..5e99fcdc71b9cf0631cb7adcc8b61a22c36ae728 100644 (file)
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = KONEPLUS_SIZE_PROFILE_SETTINGS,                         \
        .read = koneplus_sysfs_read_profilex_settings,                  \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = KONEPLUS_SIZE_PROFILE_BUTTONS,                          \
        .read = koneplus_sysfs_read_profilex_buttons,                   \
        .private = &profile_numbers[number-1],                          \
index 515bc03136c0c6497b2ada738f6f8a10f7368311..0c8e1ef0b67d14fdb1c3bf2a6b575fdea94faf8e 100644 (file)
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,                         \
        .read = kovaplus_sysfs_read_profilex_settings,                  \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,                          \
        .read = kovaplus_sysfs_read_profilex_buttons,                   \
        .private = &profile_numbers[number-1],                          \
index 5a6dbbeee790d05ae7abbfb8e045b758b6a6f5ba..1a07e07d99a06c8972a2d80b8fefa8aa4f4b3848 100644 (file)
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = PYRA_SIZE_PROFILE_SETTINGS,                             \
        .read = pyra_sysfs_read_profilex_settings,                      \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = PYRA_SIZE_PROFILE_BUTTONS,                              \
        .read = pyra_sysfs_read_profilex_buttons,                       \
        .private = &profile_numbers[number-1],                          \
index b18320db5f7d18cba708ecd306ee3f394abebddb..bc37a1800166067af660aa108d2f528c8628a260 100644 (file)
@@ -419,21 +419,14 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
  */
 static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       struct usb_device *dev = interface_to_usbdev(intf);
-       __u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
        int ret;
        char *buf = kmalloc(18, GFP_KERNEL);
 
        if (!buf)
                return -ENOMEM;
 
-       ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-                                HID_REQ_GET_REPORT,
-                                USB_DIR_IN | USB_TYPE_CLASS |
-                                USB_RECIP_INTERFACE,
-                                (3 << 8) | 0xf2, ifnum, buf, 17,
-                                USB_CTRL_GET_TIMEOUT);
+       ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+
        if (ret < 0)
                hid_err(hdev, "can't set operational mode\n");
 
index abb20db2b443ccdcc34159a97fcc83307db65c40..1446f526ee8bbade2290b615fc14b6aae35dc09f 100644 (file)
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
                goto done;
        }
 
-       if (vendor == USB_VENDOR_ID_NINTENDO) {
+       if (vendor == USB_VENDOR_ID_NINTENDO ||
+           vendor == USB_VENDOR_ID_NINTENDO2) {
                if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
                        devtype = WIIMOTE_DEV_GEN10;
                        goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
 static const struct hid_device_id wiimote_hid_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
+                               USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
index 2e7d644dba18a6fc1169c7b83ee7eded30f6e09f..71adf9e60b13f4aafa22ba94183bb4dcb8a3a21d 100644 (file)
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = {
  * the rumble motor, this flag shouldn't be set.
  */
 
+/* used by wiimod_rumble and wiipro_rumble */
+static void wiimod_rumble_worker(struct work_struct *work)
+{
+       struct wiimote_data *wdata = container_of(work, struct wiimote_data,
+                                                 rumble_worker);
+
+       spin_lock_irq(&wdata->state.lock);
+       wiiproto_req_rumble(wdata, wdata->state.cache_rumble);
+       spin_unlock_irq(&wdata->state.lock);
+}
+
 static int wiimod_rumble_play(struct input_dev *dev, void *data,
                              struct ff_effect *eff)
 {
        struct wiimote_data *wdata = input_get_drvdata(dev);
        __u8 value;
-       unsigned long flags;
 
        /*
         * The wiimote supports only a single rumble motor so if any magnitude
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
        else
                value = 0;
 
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_rumble(wdata, value);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
+       /* Locking state.lock here might deadlock with input_event() calls.
+        * schedule_work acts as barrier. Merging multiple changes is fine. */
+       wdata->state.cache_rumble = value;
+       schedule_work(&wdata->rumble_worker);
 
        return 0;
 }
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
 static int wiimod_rumble_probe(const struct wiimod_ops *ops,
                               struct wiimote_data *wdata)
 {
+       INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+
        set_bit(FF_RUMBLE, wdata->input->ffbit);
        if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play))
                return -ENOMEM;
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops,
 {
        unsigned long flags;
 
+       cancel_work_sync(&wdata->rumble_worker);
+
        spin_lock_irqsave(&wdata->state.lock, flags);
        wiiproto_req_rumble(wdata, 0);
        spin_unlock_irqrestore(&wdata->state.lock, flags);
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
 {
        struct wiimote_data *wdata = input_get_drvdata(dev);
        __u8 value;
-       unsigned long flags;
 
        /*
         * The wiimote supports only a single rumble motor so if any magnitude
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
        else
                value = 0;
 
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_rumble(wdata, value);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
+       /* Locking state.lock here might deadlock with input_event() calls.
+        * schedule_work acts as barrier. Merging multiple changes is fine. */
+       wdata->state.cache_rumble = value;
+       schedule_work(&wdata->rumble_worker);
 
        return 0;
 }
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
 {
        int ret, i;
 
+       INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+
        wdata->extension.input = input_allocate_device();
        if (!wdata->extension.input)
                return -ENOMEM;
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
        if (!wdata->extension.input)
                return;
 
+       input_unregister_device(wdata->extension.input);
+       wdata->extension.input = NULL;
+       cancel_work_sync(&wdata->rumble_worker);
+
        spin_lock_irqsave(&wdata->state.lock, flags);
        wiiproto_req_rumble(wdata, 0);
        spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       input_unregister_device(wdata->extension.input);
-       wdata->extension.input = NULL;
 }
 
 static const struct wiimod_ops wiimod_pro = {
index f1474f372c0bba1c56b3f7bd0c5eda82be29ef85..75db0c4000377f03bf262eb66a5492046aa012c8 100644 (file)
@@ -133,13 +133,15 @@ struct wiimote_state {
        __u8 *cmd_read_buf;
        __u8 cmd_read_size;
 
-       /* calibration data */
+       /* calibration/cache data */
        __u16 calib_bboard[4][3];
+       __u8 cache_rumble;
 };
 
 struct wiimote_data {
        struct hid_device *hdev;
        struct input_dev *input;
+       struct work_struct rumble_worker;
        struct led_classdev *leds[4];
        struct input_dev *accel;
        struct input_dev *ir;
index 8918dd12bb6915d08ab588e04b22b64a24d6e827..6a6dd5cd783343c0804f26311674458a19d6519b 100644 (file)
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on)
 static void drop_ref(struct hidraw *hidraw, int exists_bit)
 {
        if (exists_bit) {
-               hid_hw_close(hidraw->hid);
                hidraw->exist = 0;
-               if (hidraw->open)
+               if (hidraw->open) {
+                       hid_hw_close(hidraw->hid);
                        wake_up_interruptible(&hidraw->wait);
+               }
        } else {
                --hidraw->open;
        }
-
-       if (!hidraw->open && !hidraw->exist) {
-               device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
-               hidraw_table[hidraw->minor] = NULL;
-               kfree(hidraw);
+       if (!hidraw->open) {
+               if (!hidraw->exist) {
+                       device_destroy(hidraw_class,
+                                       MKDEV(hidraw_major, hidraw->minor));
+                       hidraw_table[hidraw->minor] = NULL;
+                       kfree(hidraw);
+               } else {
+                       /* close device for last reader */
+                       hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
+                       hid_hw_close(hidraw->hid);
+               }
        }
 }
 
index c1336193b04ba3deb92c3edf2468551f3f6933a9..fd7ce374f812ead7960b72a5d00ac3b5908ab221 100644 (file)
@@ -854,10 +854,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
                0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
                0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
        };
-       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object params[4], *obj;
+       union acpi_object params[4];
        struct acpi_object_list input;
        struct acpi_device *adev;
+       unsigned long long value;
        acpi_handle handle;
 
        handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +878,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        params[3].package.count = 0;
        params[3].package.elements = NULL;
 
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+       if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+                                                               &value))) {
                dev_err(&client->dev, "device _DSM execution failed\n");
                return -ENODEV;
        }
 
-       obj = (union acpi_object *)buf.pointer;
-       if (obj->type != ACPI_TYPE_INTEGER) {
-               dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
-                       obj->type);
-               kfree(buf.pointer);
-               return -EINVAL;
-       }
-
-       pdata->hid_descriptor_address = obj->integer.value;
+       pdata->hid_descriptor_address = value;
 
-       kfree(buf.pointer);
        return 0;
 }
 
index 5bf2fb785844919bbc27ad20160b98a998102aa5..93b00d76374cee2b82be0a9deab2a0f8d7c6755b 100644 (file)
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = {
 
 static struct miscdevice uhid_misc = {
        .fops           = &uhid_fops,
-       .minor          = MISC_DYNAMIC_MINOR,
+       .minor          = UHID_MINOR,
        .name           = UHID_NAME,
 };
 
@@ -634,4 +634,5 @@ module_exit(uhid_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
+MODULE_ALIAS_MISCDEV(UHID_MINOR);
 MODULE_ALIAS("devname:" UHID_NAME);
index 07345521f4210f4988627cff4421beedc96d1c06..3fca3be08337d76fdd8ecaec09c3d1693b0ac4ef 100644 (file)
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
 
        { 0, 0 }
 };
index 8f4743ab5fb279ae416fbce5ff4e16e58db45eaf..936093e0271e3c7fffccce7fe0e688c4d6f3ad01 100644 (file)
@@ -195,7 +195,7 @@ int vmbus_connect(void)
 
        do {
                ret = vmbus_negotiate_version(msginfo, version);
-               if (ret)
+               if (ret == -ETIMEDOUT)
                        goto cleanup;
 
                if (vmbus_connection.conn_state == CONNECTED)
index 28b03325b8729ffdf2d3356538dc2515b5af5de2..09988b2896226552e4be5cd69d5f09ba3f842d11 100644 (file)
 /*
  * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
  */
+#define WS2008_SRV_MAJOR       1
+#define WS2008_SRV_MINOR       0
+#define WS2008_SRV_VERSION     (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
+
 #define WIN7_SRV_MAJOR   3
 #define WIN7_SRV_MINOR   0
-#define WIN7_SRV_MAJOR_MINOR     (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+#define WIN7_SRV_VERSION     (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
 
 #define WIN8_SRV_MAJOR   4
 #define WIN8_SRV_MINOR   0
-#define WIN8_SRV_MAJOR_MINOR     (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+#define WIN8_SRV_VERSION     (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
 
 /*
  * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
 
        struct icmsg_hdr *icmsghdrp;
        struct icmsg_negotiate *negop = NULL;
+       int util_fw_version;
+       int kvp_srv_version;
 
        if (kvp_transaction.active) {
                /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        /*
-                        * We start with win8 version and if the host cannot
-                        * support that we use the previous version.
+                        * Based on the host, select appropriate
+                        * framework and service versions we will
+                        * negotiate.
                         */
-                       if (vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                WIN8_SRV_MAJOR_MINOR))
-                               goto done;
-
+                       switch (vmbus_proto_version) {
+                       case (VERSION_WS2008):
+                               util_fw_version = UTIL_WS2K8_FW_VERSION;
+                               kvp_srv_version = WS2008_SRV_VERSION;
+                               break;
+                       case (VERSION_WIN7):
+                               util_fw_version = UTIL_FW_VERSION;
+                               kvp_srv_version = WIN7_SRV_VERSION;
+                               break;
+                       default:
+                               util_fw_version = UTIL_FW_VERSION;
+                               kvp_srv_version = WIN8_SRV_VERSION;
+                       }
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                WIN7_SRV_MAJOR_MINOR);
+                                recv_buffer, util_fw_version,
+                                kvp_srv_version);
 
                } else {
                        kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
                        return;
 
                }
-done:
 
                icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
                        | ICMSGHDRFLAG_RESPONSE;
index e4572f3f2834fe917c5f4dbf097a30cbc8f3b383..0c354622437681d1b6dd157f718bb27cf8dbdbed 100644 (file)
@@ -26,7 +26,7 @@
 
 #define VSS_MAJOR  5
 #define VSS_MINOR  0
-#define VSS_MAJOR_MINOR    (VSS_MAJOR << 16 | VSS_MINOR)
+#define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 
 
 
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                VSS_MAJOR_MINOR);
+                                recv_buffer, UTIL_FW_VERSION,
+                                VSS_VERSION);
                } else {
                        vss_msg = (struct hv_vss_msg *)&recv_buffer[
                                sizeof(struct vmbuspipe_hdr) +
index cb82233541b196a53177007ebb4520a9e4112442..273e3ddb3a20b00c5d1ae03bab6368c5fcfbc26f 100644 (file)
 #include <linux/reboot.h>
 #include <linux/hyperv.h>
 
-#define SHUTDOWN_MAJOR 3
-#define SHUTDOWN_MINOR  0
-#define SHUTDOWN_MAJOR_MINOR   (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
 
-#define TIMESYNCH_MAJOR        3
-#define TIMESYNCH_MINOR 0
-#define TIMESYNCH_MAJOR_MINOR  (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR)
+#define SD_MAJOR       3
+#define SD_MINOR       0
+#define SD_VERSION     (SD_MAJOR << 16 | SD_MINOR)
 
-#define HEARTBEAT_MAJOR        3
-#define HEARTBEAT_MINOR 0
-#define HEARTBEAT_MAJOR_MINOR  (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR)
+#define SD_WS2008_MAJOR                1
+#define SD_WS2008_VERSION      (SD_WS2008_MAJOR << 16 | SD_MINOR)
+
+#define TS_MAJOR       3
+#define TS_MINOR       0
+#define TS_VERSION     (TS_MAJOR << 16 | TS_MINOR)
+
+#define TS_WS2008_MAJOR                1
+#define TS_WS2008_VERSION      (TS_WS2008_MAJOR << 16 | TS_MINOR)
+
+#define HB_MAJOR       3
+#define HB_MINOR 0
+#define HB_VERSION     (HB_MAJOR << 16 | HB_MINOR)
+
+#define HB_WS2008_MAJOR        1
+#define HB_WS2008_VERSION      (HB_WS2008_MAJOR << 16 | HB_MINOR)
+
+static int sd_srv_version;
+static int ts_srv_version;
+static int hb_srv_version;
+static int util_fw_version;
 
 static void shutdown_onchannelcallback(void *context);
 static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                       shut_txf_buf, UTIL_FW_MAJOR_MINOR,
-                                       SHUTDOWN_MAJOR_MINOR);
+                                       shut_txf_buf, util_fw_version,
+                                       sd_srv_version);
                } else {
                        shutdown_msg =
                                (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
        struct icmsg_hdr *icmsghdrp;
        struct ictimesync_data *timedatap;
        u8 *time_txf_buf = util_timesynch.recv_buffer;
+       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, time_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
-                                               UTIL_FW_MAJOR_MINOR,
-                                               TIMESYNCH_MAJOR_MINOR);
+                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
+                                               time_txf_buf,
+                                               util_fw_version,
+                                               ts_srv_version);
                } else {
                        timedatap = (struct ictimesync_data *)&time_txf_buf[
                                sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
        struct icmsg_hdr *icmsghdrp;
        struct heartbeat_msg_data *heartbeat_msg;
        u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, hbeat_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, NULL,
-                               hbeat_txf_buf, UTIL_FW_MAJOR_MINOR,
-                               HEARTBEAT_MAJOR_MINOR);
+                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
+                               hbeat_txf_buf, util_fw_version,
+                               hb_srv_version);
                } else {
                        heartbeat_msg =
                                (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
                goto error;
 
        hv_set_drvdata(dev, srv);
+       /*
+        * Based on the host; initialize the framework and
+        * service version numbers we will negotiate.
+        */
+       switch (vmbus_proto_version) {
+       case (VERSION_WS2008):
+               util_fw_version = UTIL_WS2K8_FW_VERSION;
+               sd_srv_version = SD_WS2008_VERSION;
+               ts_srv_version = TS_WS2008_VERSION;
+               hb_srv_version = HB_WS2008_VERSION;
+               break;
+
+       default:
+               util_fw_version = UTIL_FW_VERSION;
+               sd_srv_version = SD_VERSION;
+               ts_srv_version = TS_VERSION;
+               hb_srv_version = HB_VERSION;
+       }
+
        return 0;
 
 error:
index 2ebd6ce46108ec4e44797cb695194672ad81bdac..9c8a6bab822866ccab860a04fd276979bd69a115 100644 (file)
@@ -164,7 +164,7 @@ static const u8 abituguru_bank2_max_threshold = 50;
 static const int abituguru_pwm_settings_multiplier[5] = { 0, 1, 1, 1000, 1000 };
 /*
  * Min / Max allowed values for pwm_settings. Note: pwm1 (CPU fan) is a
- * special case the minium allowed pwm% setting for this is 30% (77) on
+ * special case the minimum allowed pwm% setting for this is 30% (77) on
  * some MB's this special case is handled in the code!
  */
 static const u8 abituguru_pwm_min[5] = { 0, 170, 170, 25, 25 };
@@ -517,7 +517,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
 
        ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
        /*
-        * Volt sensor test, enable volt low alarm, set min value ridicously
+        * Volt sensor test, enable volt low alarm, set min value ridiculously
         * high, or vica versa if the reading is very high. If its a volt
         * sensor this should always give us an alarm.
         */
@@ -564,7 +564,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
 
        /*
         * Temp sensor test, enable sensor as a temp sensor, set beep value
-        * ridicously low (but not too low, otherwise uguru ignores it).
+        * ridiculously low (but not too low, otherwise uguru ignores it).
         * If its a temp sensor this should always give us an alarm.
         */
        buf[0] = ABIT_UGURU_TEMP_HIGH_ALARM_ENABLE;
index 0cac8c0b001af817b1df36d4d6a780cb673dbc19..4ae74aa8cdc1cf519ad83396cadcfc1a6fe4ca7b 100644 (file)
@@ -176,7 +176,7 @@ struct abituguru3_data {
 
        /*
         * The abituguru3 supports up to 48 sensors, and thus has registers
-        * sets for 48 sensors, for convienence reasons / simplicity of the
+        * sets for 48 sensors, for convenience reasons / simplicity of the
         * code we always read and store all registers for all 48 sensors
         */
 
index a9e3d0152c0b5d23ae6d651c72c8782d60519987..8d40da314a8e5bf5b424a5039e4279ba819433d1 100644 (file)
@@ -381,8 +381,10 @@ static ssize_t show_str(struct device *dev,
                val = resource->oem_info;
                break;
        default:
-               BUG();
+               WARN(1, "Implementation error: unexpected attribute index %d\n",
+                    attr->index);
                val = "";
+               break;
        }
 
        return sprintf(buf, "%s\n", val);
@@ -436,7 +438,9 @@ static ssize_t show_val(struct device *dev,
                val = resource->trip[attr->index - 7] * 1000;
                break;
        default:
-               BUG();
+               WARN(1, "Implementation error: unexpected attribute index %d\n",
+                    attr->index);
+               break;
        }
 
        return sprintf(buf, "%llu\n", val);
@@ -855,7 +859,8 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
                dev_info(&device->dev, "Capping in progress.\n");
                break;
        default:
-               BUG();
+               WARN(1, "Unexpected event %d\n", event);
+               break;
        }
        mutex_unlock(&resource->lock);
 
@@ -991,7 +996,7 @@ static int __init acpi_power_meter_init(void)
 
        result = acpi_bus_register_driver(&acpi_power_meter_driver);
        if (result < 0)
-               return -ENODEV;
+               return result;
 
        return 0;
 }
index 751b1f0264a4dae9a76f49de823c807bcac022fe..04c08c2f79b8a3e16e2b537cd7a6427cb745e36d 100644 (file)
@@ -203,7 +203,6 @@ out_err:
        for (i--; i >= 0; i--)
                device_remove_file(&spi->dev, &ad_input[i].dev_attr);
 
-       spi_set_drvdata(spi, NULL);
        mutex_unlock(&adc->lock);
        return status;
 }
@@ -218,7 +217,6 @@ static int adcxx_remove(struct spi_device *spi)
        for (i = 0; i < 3 + adc->channels; i++)
                device_remove_file(&spi->dev, &ad_input[i].dev_attr);
 
-       spi_set_drvdata(spi, NULL);
        mutex_unlock(&adc->lock);
 
        return 0;
index 3a6d9ef1c16caa30612b15ca4fd698c10ba0a983..b3498acb9ab4823556edea591b1d8d3489667e1e 100644 (file)
@@ -616,7 +616,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
                data->gpio = gpio;
 
                data->last_reading = jiffies;
-       }; /* last_reading */
+       }       /* last_reading */
 
        if (!data->valid ||
            time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) {
@@ -700,7 +700,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
                }
 
                data->last_config = jiffies;
-       }; /* last_config */
+       }       /* last_config */
 
        data->valid = 1;
        mutex_unlock(&data->update_lock);
@@ -1791,7 +1791,7 @@ static int adm1026_detect(struct i2c_client *client,
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
                /* We need to be able to do byte I/O */
                return -ENODEV;
-       };
+       }
 
        /* Now, we do the remaining detection. */
 
index addb5a4d5064572212ef44df3534f30c90a9a499..562cc3881d3345465f0605c5f19c36d8b0b1f127 100644 (file)
@@ -700,7 +700,7 @@ static int find_trange_value(int trange)
                if (trange_values[i] == trange)
                        return i;
 
-       return -ENODEV;
+       return -EINVAL;
 }
 
 static struct adt7462_data *adt7462_update_device(struct device *dev)
@@ -1294,9 +1294,8 @@ static ssize_t set_pwm_tmax(struct device *dev,
        /* trange = tmax - tmin */
        tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
        trange_value = find_trange_value(trange - tmin);
-
        if (trange_value < 0)
-               return -EINVAL;
+               return trange_value;
 
        temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
        temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
index 62c2e32e25ef6823290380074d50829af30e5751..3288f13d2d871679b5eac9d6dd019675ede3c0d5 100644 (file)
@@ -230,6 +230,7 @@ static int send_argument(const char *key)
 
 static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 {
+       u8 status, data = 0;
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
                return -EIO;
        }
 
+       /* This has no effect on newer (2012) SMCs */
        if (send_byte(len, APPLESMC_DATA_PORT)) {
                pr_warn("%.4s: read len fail\n", key);
                return -EIO;
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
                buffer[i] = inb(APPLESMC_DATA_PORT);
        }
 
+       /* Read the data port until bit0 is cleared */
+       for (i = 0; i < 16; i++) {
+               udelay(APPLESMC_MIN_WAIT);
+               status = inb(APPLESMC_CMD_PORT);
+               if (!(status & 0x01))
+                       break;
+               data = inb(APPLESMC_DATA_PORT);
+       }
+       if (i)
+               pr_warn("flushed %d bytes, last value is: %d\n", i, data);
+
        return 0;
 }
 
@@ -525,16 +538,25 @@ static int applesmc_init_smcreg_try(void)
 {
        struct applesmc_registers *s = &smcreg;
        bool left_light_sensor, right_light_sensor;
+       unsigned int count;
        u8 tmp[1];
        int ret;
 
        if (s->init_complete)
                return 0;
 
-       ret = read_register_count(&s->key_count);
+       ret = read_register_count(&count);
        if (ret)
                return ret;
 
+       if (s->cache && s->key_count != count) {
+               pr_warn("key count changed from %d to %d\n",
+                       s->key_count, count);
+               kfree(s->cache);
+               s->cache = NULL;
+       }
+       s->key_count = count;
+
        if (!s->cache)
                s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
        if (!s->cache)
index 3ad9d849add2d95f96b263b590f41d70f6f78b84..8d9f2a0e8efea262aaef5311133af527f66a7315 100644 (file)
@@ -138,7 +138,7 @@ static inline u8 read_byte(struct i2c_client *client, u8 reg)
                dev_err(&client->dev,
                        "Unable to read from register 0x%02x.\n", reg);
                return 0;
-       };
+       }
        return res & 0xff;
 }
 
@@ -149,7 +149,7 @@ static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
                dev_err(&client->dev,
                        "Unable to write value 0x%02x to register 0x%02x.\n",
                        data, reg);
-       };
+       }
        return res;
 }
 
@@ -1030,7 +1030,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
                        }
                }
                data->last_high_reading = jiffies;
-       };                      /* last_reading */
+       }                       /* last_reading */
 
        /* Read all the low priority registers. */
 
@@ -1044,7 +1044,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
                        }
                }
                data->last_low_reading = jiffies;
-       };                      /* last_reading */
+       }                       /* last_reading */
 
        data->valid = 1;
 
@@ -1084,11 +1084,11 @@ static void asc7621_init_client(struct i2c_client *client)
                dev_err(&client->dev,
                        "Client (%d,0x%02x) config is locked.\n",
                        i2c_adapter_id(client->adapter), client->addr);
-       };
+       }
        if (!(value & 0x04)) {
                dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
                        i2c_adapter_id(client->adapter), client->addr);
-       };
+       }
 
 /*
  * Start monitoring
index b25c64302cbc1911f5d272e50dc05f8477b326c9..1d7ff46812c3dc9d72abb977b02cbf4db04fbd2c 100644 (file)
@@ -119,7 +119,7 @@ struct atk_data {
        acpi_handle rtmp_handle;
        acpi_handle rvlt_handle;
        acpi_handle rfan_handle;
-       /* new inteface */
+       /* new interface */
        acpi_handle enumerate_handle;
        acpi_handle read_handle;
        acpi_handle write_handle;
index aecb9ea7beb5eee65b24013de3ad0deece59c167..ddff02e3e66f9ad464b13261eaf0a7d0b76b9218 100644 (file)
@@ -147,10 +147,9 @@ static ssize_t atxp1_storevcore(struct device *dev,
 
        /* Calculate VID */
        vid = vid_to_reg(vcore, data->vrm);
-
        if (vid < 0) {
                dev_err(dev, "VID calculation failed.\n");
-               return -1;
+               return vid;
        }
 
        /*
index a26ba7a17c2be824797c728bc415cc2e3fd49bdb..872d76744e30d1a7d603cc6ca332ab2aa76a965f 100644 (file)
@@ -120,7 +120,7 @@ static const u8 DS1621_REG_TEMP[3] = {
 
 /* Each client has this additional data */
 struct ds1621_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        char valid;                     /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
@@ -151,10 +151,10 @@ static inline u16 DS1621_TEMP_TO_REG(long temp, u8 zbits)
        return temp;
 }
 
-static void ds1621_init_client(struct i2c_client *client)
+static void ds1621_init_client(struct ds1621_data *data,
+                              struct i2c_client *client)
 {
        u8 conf, new_conf, sreg, resol;
-       struct ds1621_data *data = i2c_get_clientdata(client);
 
        new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF);
        /* switch to continuous conversion mode */
@@ -197,8 +197,8 @@ static void ds1621_init_client(struct i2c_client *client)
 
 static struct ds1621_data *ds1621_update_client(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u8 new_conf;
 
        mutex_lock(&data->update_lock);
@@ -247,8 +247,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                        const char *buf, size_t count)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
        long val;
        int err;
 
@@ -258,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 
        mutex_lock(&data->update_lock);
        data->temp[attr->index] = DS1621_TEMP_TO_REG(val, data->zbits);
-       i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index],
+       i2c_smbus_write_word_swapped(data->client, DS1621_REG_TEMP[attr->index],
                                     data->temp[attr->index]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -282,16 +281,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
 static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
                          char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
 }
 
 static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long convrate;
        s32 err;
        int resol = 0;
@@ -343,8 +341,7 @@ static umode_t ds1621_attribute_visible(struct kobject *kobj,
                                        struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
 
        if (attr == &dev_attr_update_interval.attr)
                if (data->kind == ds1621 || data->kind == ds1625)
@@ -357,52 +354,31 @@ static const struct attribute_group ds1621_group = {
        .attrs = ds1621_attributes,
        .is_visible = ds1621_attribute_visible
 };
+__ATTRIBUTE_GROUPS(ds1621);
 
 static int ds1621_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct ds1621_data *data;
-       int err;
+       struct device *hwmon_dev;
 
        data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
                            GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
        data->kind = id->driver_data;
+       data->client = client;
 
        /* Initialize the DS1621 chip */
-       ds1621_init_client(client);
-
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
- exit_remove_files:
-       sysfs_remove_group(&client->dev.kobj, &ds1621_group);
-       return err;
-}
-
-static int ds1621_remove(struct i2c_client *client)
-{
-       struct ds1621_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ds1621_group);
+       ds1621_init_client(data, client);
 
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          ds1621_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id ds1621_id[] = {
@@ -422,7 +398,6 @@ static struct i2c_driver ds1621_driver = {
                .name   = "ds1621",
        },
        .probe          = ds1621_probe,
-       .remove         = ds1621_remove,
        .id_table       = ds1621_id,
 };
 
index 142e1cb8dea7e6ed641243588d525d6405d2124d..90ec1173b8a125c629542e079cac66872ea60ec4 100644 (file)
@@ -21,7 +21,6 @@
  *
  * TODO
  *     -       cache alarm and critical limit registers
- *     -       add emc1404 support
  */
 
 #include <linux/module.h>
@@ -40,7 +39,8 @@
 #define THERMAL_REVISION_REG   0xff
 
 struct thermal_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[3];
        struct mutex mutex;
        /*
         * Cache the hyst value so we don't keep re-reading it. In theory
@@ -53,10 +53,11 @@ struct thermal_data {
 static ssize_t show_temp(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
-       int retval = i2c_smbus_read_byte_data(client, sda->index);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       int retval;
 
+       retval = i2c_smbus_read_byte_data(data->client, sda->index);
        if (retval < 0)
                return retval;
        return sprintf(buf, "%d000\n", retval);
@@ -65,27 +66,27 @@ static ssize_t show_temp(struct device *dev,
 static ssize_t show_bit(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
-       int retval = i2c_smbus_read_byte_data(client, sda->nr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       int retval;
 
+       retval = i2c_smbus_read_byte_data(data->client, sda->nr);
        if (retval < 0)
                return retval;
-       retval &= sda->index;
-       return sprintf(buf, "%d\n", retval ? 1 : 0);
+       return sprintf(buf, "%d\n", !!(retval & sda->index));
 }
 
 static ssize_t store_temp(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct thermal_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int retval;
 
        if (kstrtoul(buf, 10, &val))
                return -EINVAL;
-       retval = i2c_smbus_write_byte_data(client, sda->index,
+       retval = i2c_smbus_write_byte_data(data->client, sda->index,
                                        DIV_ROUND_CLOSEST(val, 1000));
        if (retval < 0)
                return retval;
@@ -95,9 +96,9 @@ static ssize_t store_temp(struct device *dev,
 static ssize_t store_bit(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long val;
        int retval;
 
@@ -124,9 +125,9 @@ fail:
 static ssize_t show_hyst(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int retval;
        int hyst;
 
@@ -147,9 +148,9 @@ static ssize_t show_hyst(struct device *dev,
 static ssize_t store_hyst(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int retval;
        int hyst;
        unsigned long val;
@@ -232,10 +233,26 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
        show_hyst, store_hyst, 0x1A);
 
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x2D);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x2C);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x30);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 0x2A);
+static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO,
+       show_bit, NULL, 0x36, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO,
+       show_bit, NULL, 0x35, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO,
+       show_bit, NULL, 0x37, 0x08);
+static SENSOR_DEVICE_ATTR(temp4_crit_hyst, S_IRUGO | S_IWUSR,
+       show_hyst, store_hyst, 0x30);
+
 static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
        show_bit, store_bit, 0x03, 0x40);
 
-static struct attribute *mid_att_thermal[] = {
+static struct attribute *emc1403_attrs[] = {
        &sensor_dev_attr_temp1_min.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_crit.dev_attr.attr,
@@ -264,8 +281,24 @@ static struct attribute *mid_att_thermal[] = {
        NULL
 };
 
-static const struct attribute_group m_thermal_gr = {
-       .attrs = mid_att_thermal
+static const struct attribute_group emc1403_group = {
+       .attrs = emc1403_attrs,
+};
+
+static struct attribute *emc1404_attrs[] = {
+       &sensor_dev_attr_temp4_min.dev_attr.attr,
+       &sensor_dev_attr_temp4_max.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit.dev_attr.attr,
+       &sensor_dev_attr_temp4_input.dev_attr.attr,
+       &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group emc1404_group = {
+       .attrs = emc1404_attrs,
 };
 
 static int emc1403_detect(struct i2c_client *client,
@@ -286,10 +319,12 @@ static int emc1403_detect(struct i2c_client *client,
        case 0x23:
                strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
                break;
-       /*
-        * Note: 0x25 is the 1404 which is very similar and this
-        * driver could be extended
-        */
+       case 0x25:
+               strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+               break;
+       case 0x27:
+               strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+               break;
        default:
                return -ENODEV;
        }
@@ -304,43 +339,29 @@ static int emc1403_detect(struct i2c_client *client,
 static int emc1403_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
-       int res;
        struct thermal_data *data;
+       struct device *hwmon_dev;
 
        data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
                            GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->mutex);
        data->hyst_valid = jiffies - 1;         /* Expired */
 
-       res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
-       if (res) {
-               dev_warn(&client->dev, "create group failed\n");
-               return res;
-       }
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               res = PTR_ERR(data->hwmon_dev);
-               dev_warn(&client->dev, "register hwmon dev failed\n");
-               goto thermal_error;
-       }
-       dev_info(&client->dev, "EMC1403 Thermal chip found\n");
-       return 0;
-
-thermal_error:
-       sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
-       return res;
-}
+       data->groups[0] = &emc1403_group;
+       if (id->driver_data)
+               data->groups[1] = &emc1404_group;
 
-static int emc1403_remove(struct i2c_client *client)
-{
-       struct thermal_data *data = i2c_get_clientdata(client);
+       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+                                                     client->name, data,
+                                                     data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+       dev_info(&client->dev, "%s Thermal chip found\n", id->name);
        return 0;
 }
 
@@ -350,7 +371,9 @@ static const unsigned short emc1403_address_list[] = {
 
 static const struct i2c_device_id emc1403_idtable[] = {
        { "emc1403", 0 },
+       { "emc1404", 1 },
        { "emc1423", 0 },
+       { "emc1424", 1 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
@@ -362,7 +385,6 @@ static struct i2c_driver sensor_emc1403 = {
        },
        .detect = emc1403_detect,
        .probe = emc1403_probe,
-       .remove = emc1403_remove,
        .id_table = emc1403_idtable,
        .address_list = emc1403_address_list,
 };
index 31b221eeee6ca7c4c789cc4aeaf88164136664fd..03d8592810bf04d4ebfe66cd86ea7e1b025185ac 100644 (file)
@@ -2420,7 +2420,6 @@ static int f71882fg_probe(struct platform_device *pdev)
 exit_unregister_sysfs:
        f71882fg_remove(pdev); /* Will unregister the sysfs files for us */
        return err; /* f71882fg_remove() also frees our data */
-       return err;
 }
 
 static int f71882fg_remove(struct platform_device *pdev)
index a837b94977f4e8e59d9f3478a96747fe56de803e..80c42bea90ed59a0a8c89d996e78613df33f8745 100644 (file)
@@ -275,7 +275,7 @@ static bool duty_mode_enabled(u8 pwm_enable)
        case 3: /* Manual, speed mode */
                return false;
        default:
-               BUG();
+               WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
                return true;
        }
 }
@@ -291,7 +291,7 @@ static bool auto_mode_enabled(u8 pwm_enable)
        case 4: /* Auto, duty mode */
                return true;
        default:
-               BUG();
+               WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
                return false;
        }
 }
index b7d6a5704eb2ff15887f77341e8d8b2e57cdb291..73181be5b30b806a875f497097f120795db5309c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/hwmon.h>
 #include <linux/gpio.h>
 #include <linux/gpio-fan.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 
@@ -169,7 +170,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
        dev_warn(&fan_data->pdev->dev,
                 "missing speed array entry for GPIO value 0x%x\n", ctrl_val);
 
-       return -EINVAL;
+       return -ENODEV;
 }
 
 static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
@@ -309,12 +310,6 @@ exit_unlock:
        return ret;
 }
 
-static ssize_t show_name(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "gpio-fan\n");
-}
-
 static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
                   show_pwm_enable, set_pwm_enable);
@@ -324,26 +319,23 @@ static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
 static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
 static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
 static umode_t gpio_fan_is_visible(struct kobject *kobj,
                                   struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct gpio_fan_data *data = dev_get_drvdata(dev);
 
-       if (index == 1 && !data->alarm)
+       if (index == 0 && !data->alarm)
                return 0;
-       if (index > 1 && !data->ctrl)
+       if (index > 0 && !data->ctrl)
                return 0;
 
        return attr->mode;
 }
 
 static struct attribute *gpio_fan_attributes[] = {
-       &dev_attr_name.attr,
-       &dev_attr_fan1_alarm.attr,              /* 1 */
-       &dev_attr_pwm1.attr,                    /* 2 */
+       &dev_attr_fan1_alarm.attr,              /* 0 */
+       &dev_attr_pwm1.attr,                    /* 1 */
        &dev_attr_pwm1_enable.attr,
        &dev_attr_pwm1_mode.attr,
        &dev_attr_fan1_input.attr,
@@ -358,6 +350,11 @@ static const struct attribute_group gpio_fan_group = {
        .is_visible = gpio_fan_is_visible,
 };
 
+static const struct attribute_group *gpio_fan_groups[] = {
+       &gpio_fan_group,
+       NULL
+};
+
 static int fan_ctrl_init(struct gpio_fan_data *fan_data,
                         struct gpio_fan_platform_data *pdata)
 {
@@ -384,7 +381,7 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
        fan_data->pwm_enable = true; /* Enable manual fan speed control. */
        fan_data->speed_index = get_fan_speed_index(fan_data);
        if (fan_data->speed_index < 0)
-               return -ENODEV;
+               return fan_data->speed_index;
 
        return 0;
 }
@@ -539,24 +536,16 @@ static int gpio_fan_probe(struct platform_device *pdev)
                        return err;
        }
 
-       err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_group);
-       if (err)
-               return err;
-
        /* Make this driver part of hwmon class. */
-       fan_data->hwmon_dev = hwmon_device_register(&pdev->dev);
-       if (IS_ERR(fan_data->hwmon_dev)) {
-               err = PTR_ERR(fan_data->hwmon_dev);
-               goto err_remove;
-       }
+       fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+                                               "gpio-fan", fan_data,
+                                               gpio_fan_groups);
+       if (IS_ERR(fan_data->hwmon_dev))
+               return PTR_ERR(fan_data->hwmon_dev);
 
        dev_info(&pdev->dev, "GPIO fan initialized\n");
 
        return 0;
-
-err_remove:
-       sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
-       return err;
 }
 
 static int gpio_fan_remove(struct platform_device *pdev)
@@ -564,7 +553,6 @@ static int gpio_fan_remove(struct platform_device *pdev)
        struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
 
        hwmon_device_unregister(fan_data->hwmon_dev);
-       sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
 
        return 0;
 }
index 646314f7c8397e05cfb1c89da7786005e6e41b7a..e176a43af63d7700a3f92753732cc16aabd08aeb 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/hwmon.h>
 #define HWMON_ID_PREFIX "hwmon"
 #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
 
-static struct class *hwmon_class;
+struct hwmon_device {
+       const char *name;
+       struct device dev;
+};
+#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static struct attribute *hwmon_dev_attrs[] = {
+       &dev_attr_name.attr,
+       NULL
+};
+
+static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+                                        struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+
+       if (to_hwmon_device(dev)->name == NULL)
+               return 0;
+
+       return attr->mode;
+}
+
+static struct attribute_group hwmon_dev_attr_group = {
+       .attrs          = hwmon_dev_attrs,
+       .is_visible     = hwmon_dev_name_is_visible,
+};
+
+static const struct attribute_group *hwmon_dev_attr_groups[] = {
+       &hwmon_dev_attr_group,
+       NULL
+};
+
+static void hwmon_dev_release(struct device *dev)
+{
+       kfree(to_hwmon_device(dev));
+}
+
+static struct class hwmon_class = {
+       .name = "hwmon",
+       .owner = THIS_MODULE,
+       .dev_groups = hwmon_dev_attr_groups,
+       .dev_release = hwmon_dev_release,
+};
 
 static DEFINE_IDA(hwmon_ida);
 
 /**
- * hwmon_device_register - register w/ hwmon
- * @dev: the device to register
+ * hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
  *
  * hwmon_device_unregister() must be called when the device is no
  * longer needed.
  *
  * Returns the pointer to the new device.
  */
-struct device *hwmon_device_register(struct device *dev)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                 void *drvdata,
+                                 const struct attribute_group **groups)
 {
-       struct device *hwdev;
-       int id;
+       struct hwmon_device *hwdev;
+       int err, id;
 
        id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
        if (id < 0)
                return ERR_PTR(id);
 
-       hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
-                             HWMON_ID_FORMAT, id);
+       hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+       if (hwdev == NULL) {
+               err = -ENOMEM;
+               goto ida_remove;
+       }
 
-       if (IS_ERR(hwdev))
-               ida_simple_remove(&hwmon_ida, id);
+       hwdev->name = name;
+       hwdev->dev.class = &hwmon_class;
+       hwdev->dev.parent = dev;
+       hwdev->dev.groups = groups;
+       hwdev->dev.of_node = dev ? dev->of_node : NULL;
+       dev_set_drvdata(&hwdev->dev, drvdata);
+       dev_set_name(&hwdev->dev, HWMON_ID_FORMAT, id);
+       err = device_register(&hwdev->dev);
+       if (err)
+               goto free;
 
-       return hwdev;
+       return &hwdev->dev;
+
+free:
+       kfree(hwdev);
+ida_remove:
+       ida_simple_remove(&hwmon_ida, id);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
+
+/**
+ * hwmon_device_register - register w/ hwmon
+ * @dev: the device to register
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *hwmon_device_register(struct device *dev)
+{
+       return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register);
 
@@ -75,6 +163,69 @@ void hwmon_device_unregister(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(hwmon_device_unregister);
 
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+       struct device *hwdev = *(struct device **)res;
+
+       hwmon_device_unregister(hwdev);
+}
+
+/**
+ * devm_hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
+ *
+ * Returns the pointer to the new device. The new device is automatically
+ * unregistered with the parent device.
+ */
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                      void *drvdata,
+                                      const struct attribute_group **groups)
+{
+       struct device **ptr, *hwdev;
+
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+
+       ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+       if (IS_ERR(hwdev))
+               goto error;
+
+       *ptr = hwdev;
+       devres_add(dev, ptr);
+       return hwdev;
+
+error:
+       devres_free(ptr);
+       return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+
+static int devm_hwmon_match(struct device *dev, void *res, void *data)
+{
+       struct device **hwdev = res;
+
+       return *hwdev == data;
+}
+
+/**
+ * devm_hwmon_device_unregister - removes a previously registered hwmon device
+ *
+ * @dev: the parent device of the device to unregister
+ */
+void devm_hwmon_device_unregister(struct device *dev)
+{
+       WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev));
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister);
+
 static void __init hwmon_pci_quirks(void)
 {
 #if defined CONFIG_X86 && defined CONFIG_PCI
@@ -105,19 +256,21 @@ static void __init hwmon_pci_quirks(void)
 
 static int __init hwmon_init(void)
 {
+       int err;
+
        hwmon_pci_quirks();
 
-       hwmon_class = class_create(THIS_MODULE, "hwmon");
-       if (IS_ERR(hwmon_class)) {
-               pr_err("couldn't create sysfs class\n");
-               return PTR_ERR(hwmon_class);
+       err = class_register(&hwmon_class);
+       if (err) {
+               pr_err("couldn't register hwmon sysfs class\n");
+               return err;
        }
        return 0;
 }
 
 static void __exit hwmon_exit(void)
 {
-       class_destroy(hwmon_class);
+       class_unregister(&hwmon_class);
 }
 
 subsys_initcall(hwmon_init);
index c6fdd5bd395ecb0db5f72e36be4f9d9e33f3aeaf..5378fdefc1f76d7782d9073e5a98206d953b9e1d 100644 (file)
@@ -63,7 +63,7 @@
 #define INA209_SHUNT_DEFAULT           10000   /* uOhm */
 
 struct ina209_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        struct mutex update_lock;
        bool valid;
@@ -78,8 +78,8 @@ struct ina209_data {
 
 static struct ina209_data *ina209_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
+       struct ina209_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ina209_data *ret = data;
        s32 val;
        int i;
@@ -234,7 +234,6 @@ static ssize_t ina209_set_interval(struct device *dev,
                                   struct device_attribute *da,
                                   const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct ina209_data *data = ina209_update_device(dev);
        long val;
        u16 regval;
@@ -250,7 +249,8 @@ static ssize_t ina209_set_interval(struct device *dev,
        mutex_lock(&data->update_lock);
        regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
                                          val);
-       i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+       i2c_smbus_write_word_swapped(data->client, INA209_CONFIGURATION,
+                                    regval);
        data->regs[INA209_CONFIGURATION] = regval;
        data->update_interval = ina209_interval_from_reg(regval);
        mutex_unlock(&data->update_lock);
@@ -260,8 +260,7 @@ static ssize_t ina209_set_interval(struct device *dev,
 static ssize_t ina209_show_interval(struct device *dev,
                                    struct device_attribute *da, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
+       struct ina209_data *data = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
 }
@@ -285,9 +284,9 @@ static ssize_t ina209_reset_history(struct device *dev,
                                    const char *buf,
                                    size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct ina209_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u32 mask = attr->index;
        long val;
        int i, ret;
@@ -312,7 +311,6 @@ static ssize_t ina209_set_value(struct device *dev,
                                const char *buf,
                                size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct ina209_data *data = ina209_update_device(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        int reg = attr->index;
@@ -332,7 +330,7 @@ static ssize_t ina209_set_value(struct device *dev,
                count = ret;
                goto abort;
        }
-       i2c_smbus_write_word_swapped(client, reg, ret);
+       i2c_smbus_write_word_swapped(data->client, reg, ret);
        data->regs[reg] = ret;
 abort:
        mutex_unlock(&data->update_lock);
@@ -457,7 +455,7 @@ static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
  * Finally, construct an array of pointers to members of the above objects,
  * as required for sysfs_create_group()
  */
-static struct attribute *ina209_attributes[] = {
+static struct attribute *ina209_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in0_input_highest.dev_attr.attr,
        &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
@@ -498,10 +496,7 @@ static struct attribute *ina209_attributes[] = {
 
        NULL,
 };
-
-static const struct attribute_group ina209_group = {
-       .attrs = ina209_attributes,
-};
+ATTRIBUTE_GROUPS(ina209);
 
 static void ina209_restore_conf(struct i2c_client *client,
                                struct ina209_data *data)
@@ -565,6 +560,7 @@ static int ina209_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adapter = client->adapter;
        struct ina209_data *data;
+       struct device *hwmon_dev;
        int ret;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -575,27 +571,23 @@ static int ina209_probe(struct i2c_client *client,
                return -ENOMEM;
 
        i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        ret = ina209_init_client(client, data);
        if (ret)
                return ret;
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
-       if (ret)
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name,
+                                                          data, ina209_groups);
+       if (IS_ERR(hwmon_dev)) {
+               ret = PTR_ERR(hwmon_dev);
                goto out_restore_conf;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
        }
 
        return 0;
 
-out_hwmon_device_register:
-       sysfs_remove_group(&client->dev.kobj, &ina209_group);
 out_restore_conf:
        ina209_restore_conf(client, data);
        return ret;
@@ -605,8 +597,6 @@ static int ina209_remove(struct i2c_client *client)
 {
        struct ina209_data *data = i2c_get_clientdata(client);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ina209_group);
        ina209_restore_conf(client, data);
 
        return 0;
index 70a39a8ac0160e520f6017225c71d1efedebca2c..93d26e8af3e2002b8838c08b13f06787e6021dde 100644 (file)
@@ -78,7 +78,7 @@ struct ina2xx_config {
 };
 
 struct ina2xx_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        const struct ina2xx_config *config;
 
        struct mutex update_lock;
@@ -112,8 +112,8 @@ static const struct ina2xx_config ina2xx_config[] = {
 
 static struct ina2xx_data *ina2xx_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina2xx_data *data = i2c_get_clientdata(client);
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ina2xx_data *ret = data;
 
        mutex_lock(&data->update_lock);
@@ -203,41 +203,39 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_POWER);
 
 /* pointers to created device attributes */
-static struct attribute *ina2xx_attributes[] = {
+static struct attribute *ina2xx_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_curr1_input.dev_attr.attr,
        &sensor_dev_attr_power1_input.dev_attr.attr,
        NULL,
 };
-
-static const struct attribute_group ina2xx_group = {
-       .attrs = ina2xx_attributes,
-};
+ATTRIBUTE_GROUPS(ina2xx);
 
 static int ina2xx_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
-       struct ina2xx_data *data;
        struct ina2xx_platform_data *pdata;
-       int ret;
-       u32 val;
+       struct device *dev = &client->dev;
+       struct ina2xx_data *data;
+       struct device *hwmon_dev;
        long shunt = 10000; /* default shunt value 10mOhms */
+       u32 val;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
                return -ENODEV;
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       if (dev_get_platdata(&client->dev)) {
-               pdata = dev_get_platdata(&client->dev);
+       if (dev_get_platdata(dev)) {
+               pdata = dev_get_platdata(dev);
                shunt = pdata->shunt_uohms;
-       } else if (!of_property_read_u32(client->dev.of_node,
-                               "shunt-resistor", &val)) {
-                       shunt = val;
+       } else if (!of_property_read_u32(dev->of_node,
+                                        "shunt-resistor", &val)) {
+               shunt = val;
        }
 
        if (shunt <= 0)
@@ -255,37 +253,18 @@ static int ina2xx_probe(struct i2c_client *client,
        i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
                                     data->config->calibration_factor / shunt);
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
-       ret = sysfs_create_group(&client->dev.kobj, &ina2xx_group);
-       if (ret)
-               return ret;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_err_hwmon;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, ina2xx_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       dev_info(&client->dev, "power monitor %s (Rshunt = %li uOhm)\n",
+       dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
                 id->name, shunt);
 
        return 0;
-
-out_err_hwmon:
-       sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-       return ret;
-}
-
-static int ina2xx_remove(struct i2c_client *client)
-{
-       struct ina2xx_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-
-       return 0;
 }
 
 static const struct i2c_device_id ina2xx_id[] = {
@@ -302,7 +281,6 @@ static struct i2c_driver ina2xx_driver = {
                .name   = "ina2xx",
        },
        .probe          = ina2xx_probe,
-       .remove         = ina2xx_remove,
        .id_table       = ina2xx_id,
 };
 
index 4a58f130fd4e622cf840969920f5dfd62a393a2b..6013611e4f219145ae9bb8da6a3a88b686801076 100644 (file)
@@ -163,7 +163,7 @@ static struct jc42_chips jc42_chips[] = {
 
 /* Each client has this additional data */
 struct jc42_data {
-       struct device   *hwmon_dev;
+       struct i2c_client *client;
        struct mutex    update_lock;    /* protect register access */
        bool            extended;       /* true if extended range supported */
        bool            valid;
@@ -193,21 +193,21 @@ MODULE_DEVICE_TABLE(i2c, jc42_id);
 
 static int jc42_suspend(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
 
        data->config |= JC42_CFG_SHUTDOWN;
-       i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+       i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+                                    data->config);
        return 0;
 }
 
 static int jc42_resume(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
 
        data->config &= ~JC42_CFG_SHUTDOWN;
-       i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+       i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+                                    data->config);
        return 0;
 }
 
@@ -317,15 +317,14 @@ static ssize_t set_##value(struct device *dev,                            \
                           struct device_attribute *attr,               \
                           const char *buf, size_t count)               \
 {                                                                      \
-       struct i2c_client *client = to_i2c_client(dev);                 \
-       struct jc42_data *data = i2c_get_clientdata(client);            \
+       struct jc42_data *data = dev_get_drvdata(dev);                  \
        int err, ret = count;                                           \
        long val;                                                       \
-       if (kstrtol(buf, 10, &val) < 0)                         \
+       if (kstrtol(buf, 10, &val) < 0)                                 \
                return -EINVAL;                                         \
        mutex_lock(&data->update_lock);                                 \
        data->value = jc42_temp_to_reg(val, data->extended);            \
-       err = i2c_smbus_write_word_swapped(client, reg, data->value);   \
+       err = i2c_smbus_write_word_swapped(data->client, reg, data->value); \
        if (err < 0)                                                    \
                ret = err;                                              \
        mutex_unlock(&data->update_lock);                               \
@@ -344,8 +343,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int diff, hyst;
        int err;
@@ -368,7 +366,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
        mutex_lock(&data->update_lock);
        data->config = (data->config & ~JC42_CFG_HYST_MASK)
          | (hyst << JC42_CFG_HYST_SHIFT);
-       err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+       err = i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
                                           data->config);
        if (err < 0)
                ret = err;
@@ -430,8 +428,7 @@ static umode_t jc42_attribute_mode(struct kobject *kobj,
                                  struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
        unsigned int config = data->config;
        bool readonly;
 
@@ -452,6 +449,7 @@ static const struct attribute_group jc42_group = {
        .attrs = jc42_attributes,
        .is_visible = jc42_attribute_mode,
 };
+__ATTRIBUTE_GROUPS(jc42);
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
 static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
@@ -487,14 +485,16 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
 
 static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       struct jc42_data *data;
-       int config, cap, err;
        struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct jc42_data *data;
+       int config, cap;
 
        data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
+       data->client = client;
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
@@ -515,29 +515,15 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
        }
        data->config = config;
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &jc42_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       sysfs_remove_group(&dev->kobj, &jc42_group);
-       return err;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          jc42_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static int jc42_remove(struct i2c_client *client)
 {
        struct jc42_data *data = i2c_get_clientdata(client);
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &jc42_group);
 
        /* Restore original configuration except hysteresis */
        if ((data->config & ~JC42_CFG_HYST_MASK) !=
@@ -553,8 +539,8 @@ static int jc42_remove(struct i2c_client *client)
 
 static struct jc42_data *jc42_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct jc42_data *ret = data;
        int val;
 
index 016efa26ba7ca48e80733d949745010190416951..505a59e100b0b9a80b099aa8a9abd43fa5ec2220 100644 (file)
@@ -174,7 +174,6 @@ out_dev_reg_failed:
 out_dev_create_file_failed:
        device_remove_file(&spi->dev, &dev_attr_temp1_input);
 out_dev_create_temp_file_failed:
-       spi_set_drvdata(spi, NULL);
        return status;
 }
 
@@ -185,7 +184,6 @@ static int lm70_remove(struct spi_device *spi)
        hwmon_device_unregister(p_lm70->hwmon_dev);
        device_remove_file(&spi->dev, &dev_attr_temp1_input);
        device_remove_file(&spi->dev, &dev_attr_name);
-       spi_set_drvdata(spi, NULL);
 
        return 0;
 }
index 9bde9644b102d9bcdade74ce20db444b832f9270..9653bb870a478f58e85443fb0bec69cd35f4a559 100644 (file)
@@ -55,7 +55,7 @@ static const unsigned short lm73_convrates[] = {
 };
 
 struct lm73_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex lock;
        u8 ctrl;                        /* control register value */
 };
@@ -66,7 +66,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                        const char *buf, size_t count)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = dev_get_drvdata(dev);
        long temp;
        short value;
        s32 err;
@@ -77,7 +77,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 
        /* Write value */
        value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
-       err = i2c_smbus_write_word_swapped(client, attr->index, value);
+       err = i2c_smbus_write_word_swapped(data->client, attr->index, value);
        return (err < 0) ? err : count;
 }
 
@@ -85,10 +85,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
                         char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = dev_get_drvdata(dev);
        int temp;
 
-       s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+       s32 err = i2c_smbus_read_word_swapped(data->client, attr->index);
        if (err < 0)
                return err;
 
@@ -101,8 +101,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
 static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        unsigned long convrate;
        s32 err;
        int res = 0;
@@ -124,7 +123,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
        mutex_lock(&data->lock);
        data->ctrl &= LM73_CTRL_TO_MASK;
        data->ctrl |= res << LM73_CTRL_RES_SHIFT;
-       err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+       err = i2c_smbus_write_byte_data(data->client, LM73_REG_CTRL,
+                                       data->ctrl);
        mutex_unlock(&data->lock);
 
        if (err < 0)
@@ -136,8 +136,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
 static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
                             char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        int res;
 
        res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
@@ -147,13 +146,12 @@ static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
 static ssize_t show_maxmin_alarm(struct device *dev,
                                 struct device_attribute *da, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        s32 ctrl;
 
        mutex_lock(&data->lock);
-       ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+       ctrl = i2c_smbus_read_byte_data(data->client, LM73_REG_CTRL);
        if (ctrl < 0)
                goto abort;
        data->ctrl = ctrl;
@@ -183,7 +181,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
 static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
                        show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
 
-static struct attribute *lm73_attributes[] = {
+static struct attribute *lm73_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_min.dev_attr.attr,
@@ -192,10 +190,7 @@ static struct attribute *lm73_attributes[] = {
        &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
        NULL
 };
-
-static const struct attribute_group lm73_group = {
-       .attrs = lm73_attributes,
-};
+ATTRIBUTE_GROUPS(lm73);
 
 /*-----------------------------------------------------------------------*/
 
@@ -204,16 +199,16 @@ static const struct attribute_group lm73_group = {
 static int
 lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       int status;
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
        struct lm73_data *data;
        int ctrl;
 
-       data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
-                           GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(struct lm73_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->lock);
 
        ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
@@ -221,33 +216,13 @@ lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
                return ctrl;
        data->ctrl = ctrl;
 
-       /* Register sysfs hooks */
-       status = sysfs_create_group(&client->dev.kobj, &lm73_group);
-       if (status)
-               return status;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               status = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, lm73_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       dev_info(&client->dev, "%s: sensor '%s'\n",
-                dev_name(data->hwmon_dev), client->name);
-
-       return 0;
-
-exit_remove:
-       sysfs_remove_group(&client->dev.kobj, &lm73_group);
-       return status;
-}
-
-static int lm73_remove(struct i2c_client *client)
-{
-       struct lm73_data *data = i2c_get_clientdata(client);
+       dev_info(dev, "sensor '%s'\n", client->name);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &lm73_group);
        return 0;
 }
 
@@ -300,7 +275,6 @@ static struct i2c_driver lm73_driver = {
                .name   = "lm73",
        },
        .probe          = lm73_probe,
-       .remove         = lm73_remove,
        .id_table       = lm73_ids,
        .detect         = lm73_detect,
        .address_list   = normal_i2c,
index 307c9eaeeb9f26eaa3e0e76e049b746afca832ad..411202bdaf6b6bf8c3eb0141ad7a62ecd7d4cfbe 100644 (file)
@@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END };
 
 /* Client data (each client gets its own) */
 struct lm95234_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        unsigned long last_updated, interval;   /* in jiffies */
        bool valid;             /* false until following fields are valid */
@@ -114,9 +114,9 @@ static u16 update_intervals[] = { 143, 364, 1000, 2500 };
 
 /* Fill value cache. Must be called with update lock held. */
 
-static int lm95234_fill_cache(struct i2c_client *client)
+static int lm95234_fill_cache(struct lm95234_data *data,
+                             struct i2c_client *client)
 {
-       struct lm95234_data *data = i2c_get_clientdata(client);
        int i, ret;
 
        ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
@@ -157,9 +157,9 @@ static int lm95234_fill_cache(struct i2c_client *client)
        return 0;
 }
 
-static int lm95234_update_device(struct i2c_client *client,
-                                struct lm95234_data *data)
+static int lm95234_update_device(struct lm95234_data *data)
 {
+       struct i2c_client *client = data->client;
        int ret;
 
        mutex_lock(&data->update_lock);
@@ -169,7 +169,7 @@ static int lm95234_update_device(struct i2c_client *client,
                int i;
 
                if (!data->valid) {
-                       ret = lm95234_fill_cache(client);
+                       ret = lm95234_fill_cache(data, client);
                        if (ret < 0)
                                goto abort;
                }
@@ -209,10 +209,9 @@ abort:
 static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -224,10 +223,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
 static ssize_t show_alarm(struct device *dev,
                          struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        u32 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -238,10 +236,9 @@ static ssize_t show_alarm(struct device *dev,
 static ssize_t show_type(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        u8 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -252,11 +249,10 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
 static ssize_t set_type(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        unsigned long val;
        u8 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -274,7 +270,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
        else
                data->sensor_type &= ~mask;
        data->valid = false;
-       i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL,
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL,
                                  data->sensor_type);
        mutex_unlock(&data->update_lock);
 
@@ -284,10 +280,9 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -298,11 +293,10 @@ static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
 static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
        long val;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -315,7 +309,7 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->tcrit2[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT2(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -324,10 +318,9 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit2_hyst(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -340,8 +333,7 @@ static ssize_t show_tcrit2_hyst(struct device *dev,
 static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
 
        return sprintf(buf, "%u", data->tcrit1[index] * 1000);
@@ -350,11 +342,10 @@ static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
 static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -367,7 +358,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->tcrit1[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT1(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -376,10 +367,9 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit1_hyst(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -393,11 +383,10 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -411,7 +400,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
 
        mutex_lock(&data->update_lock);
        data->thyst = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT_HYST, val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -420,10 +409,9 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
 static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -434,11 +422,10 @@ static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
 static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -452,7 +439,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->toffset[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_OFFSET(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -461,9 +448,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
 static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
-       int ret = lm95234_update_device(client, data);
+       struct lm95234_data *data = dev_get_drvdata(dev);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -475,11 +461,10 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
 static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
+       int ret = lm95234_update_device(data);
        unsigned long val;
        u8 regval;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -495,7 +480,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->interval = msecs_to_jiffies(update_intervals[regval]);
-       i2c_smbus_write_byte_data(client, LM95234_REG_CONVRATE, regval);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -579,7 +564,7 @@ static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
 static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
                   set_interval);
 
-static struct attribute *lm95234_attributes[] = {
+static struct attribute *lm95234_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp2_input.dev_attr.attr,
        &sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -621,10 +606,7 @@ static struct attribute *lm95234_attributes[] = {
        &dev_attr_update_interval.attr,
        NULL
 };
-
-static const struct attribute_group lm95234_group = {
-       .attrs = lm95234_attributes,
-};
+ATTRIBUTE_GROUPS(lm95234);
 
 static int lm95234_detect(struct i2c_client *client,
                          struct i2c_board_info *info)
@@ -701,13 +683,14 @@ static int lm95234_probe(struct i2c_client *client,
 {
        struct device *dev = &client->dev;
        struct lm95234_data *data;
+       struct device *hwmon_dev;
        int err;
 
        data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Initialize the LM95234 chip */
@@ -715,32 +698,10 @@ static int lm95234_probe(struct i2c_client *client,
        if (err < 0)
                return err;
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &lm95234_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
-exit_remove_files:
-       sysfs_remove_group(&dev->kobj, &lm95234_group);
-       return err;
-}
-
-static int lm95234_remove(struct i2c_client *client)
-{
-       struct lm95234_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &lm95234_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          lm95234_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 /* Driver data (common to all clients) */
@@ -756,7 +717,6 @@ static struct i2c_driver lm95234_driver = {
                .name   = DRVNAME,
        },
        .probe          = lm95234_probe,
-       .remove         = lm95234_remove,
        .id_table       = lm95234_id,
        .detect         = lm95234_detect,
        .address_list   = normal_i2c,
index cdc1ecc6734d4122f9aab8736f5b4f37217aaf6b..d4172933ce4f2b67d8696b97527d25e7fd5fab48 100644 (file)
@@ -51,7 +51,9 @@ enum ltc4245_cmd {
 };
 
 struct ltc4245_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+
+       const struct attribute_group *groups[3];
 
        struct mutex update_lock;
        bool valid;
@@ -77,8 +79,8 @@ struct ltc4245_data {
  */
 static void ltc4245_update_gpios(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct ltc4245_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u8 gpio_curr, gpio_next, gpio_reg;
        int i;
 
@@ -130,8 +132,8 @@ static void ltc4245_update_gpios(struct device *dev)
 
 static struct ltc4245_data *ltc4245_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct ltc4245_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        s32 val;
        int i;
 
@@ -455,41 +457,14 @@ static const struct attribute_group ltc4245_gpio_group = {
        .attrs = ltc4245_gpio_attributes,
 };
 
-static int ltc4245_sysfs_create_groups(struct i2c_client *client)
+static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
 {
-       struct ltc4245_data *data = i2c_get_clientdata(client);
-       struct device *dev = &client->dev;
-       int ret;
-
-       /* register the standard sysfs attributes */
-       ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group);
-       if (ret) {
-               dev_err(dev, "unable to register standard attributes\n");
-               return ret;
-       }
+       /* standard sysfs attributes */
+       data->groups[0] = &ltc4245_std_group;
 
        /* if we're using the extra gpio support, register it's attributes */
-       if (data->use_extra_gpios) {
-               ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group);
-               if (ret) {
-                       dev_err(dev, "unable to register gpio attributes\n");
-                       sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static void ltc4245_sysfs_remove_groups(struct i2c_client *client)
-{
-       struct ltc4245_data *data = i2c_get_clientdata(client);
-       struct device *dev = &client->dev;
-
        if (data->use_extra_gpios)
-               sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group);
-
-       sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+               data->groups[1] = &ltc4245_gpio_group;
 }
 
 static bool ltc4245_use_extra_gpios(struct i2c_client *client)
@@ -517,7 +492,7 @@ static int ltc4245_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adapter = client->adapter;
        struct ltc4245_data *data;
-       int ret;
+       struct device *hwmon_dev;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -ENODEV;
@@ -526,7 +501,7 @@ static int ltc4245_probe(struct i2c_client *client,
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
        data->use_extra_gpios = ltc4245_use_extra_gpios(client);
 
@@ -534,30 +509,25 @@ static int ltc4245_probe(struct i2c_client *client,
        i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
        i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
 
-       /* Register sysfs hooks */
-       ret = ltc4245_sysfs_create_groups(client);
-       if (ret)
-               return ret;
+       /* Add sysfs hooks */
+       ltc4245_sysfs_add_groups(data);
 
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
-       }
+       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+                                                     client->name, data,
+                                                     data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       return 0;
+       i2c_set_clientdata(client, hwmon_dev);
 
-out_hwmon_device_register:
-       ltc4245_sysfs_remove_groups(client);
-       return ret;
+       return 0;
 }
 
 static int ltc4245_remove(struct i2c_client *client)
 {
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct device *hwmon_dev = i2c_get_clientdata(client);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       ltc4245_sysfs_remove_groups(client);
+       hwmon_device_unregister(hwmon_dev);
 
        return 0;
 }
index 487da58ec86c2fbab1cceacbc9b8b0ffa799ea20..0becd69842bb7cb0586f169e1dcbf75a10620278 100644 (file)
@@ -55,7 +55,7 @@
 #define FAULT_OC       (1<<2)
 
 struct ltc4261_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        struct mutex update_lock;
        bool valid;
@@ -67,8 +67,8 @@ struct ltc4261_data {
 
 static struct ltc4261_data *ltc4261_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4261_data *data = i2c_get_clientdata(client);
+       struct ltc4261_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ltc4261_data *ret = data;
 
        mutex_lock(&data->update_lock);
@@ -150,7 +150,6 @@ static ssize_t ltc4261_show_bool(struct device *dev,
                                 struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
        struct ltc4261_data *data = ltc4261_update_device(dev);
        u8 fault;
 
@@ -159,7 +158,7 @@ static ssize_t ltc4261_show_bool(struct device *dev,
 
        fault = data->regs[LTC4261_FAULT] & attr->index;
        if (fault)              /* Clear reported faults in chip register */
-               i2c_smbus_write_byte_data(client, LTC4261_FAULT, ~fault);
+               i2c_smbus_write_byte_data(data->client, LTC4261_FAULT, ~fault);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", fault ? 1 : 0);
 }
@@ -197,7 +196,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4261_show_value, NULL,
 static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
                          FAULT_OC);
 
-static struct attribute *ltc4261_attributes[] = {
+static struct attribute *ltc4261_attrs[] = {
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
        &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
@@ -210,62 +209,38 @@ static struct attribute *ltc4261_attributes[] = {
 
        NULL,
 };
-
-static const struct attribute_group ltc4261_group = {
-       .attrs = ltc4261_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4261);
 
 static int ltc4261_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
+       struct device *dev = &client->dev;
        struct ltc4261_data *data;
-       int ret;
+       struct device *hwmon_dev;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -ENODEV;
 
        if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
-               dev_err(&client->dev, "Failed to read status register\n");
+               dev_err(dev, "Failed to read status register\n");
                return -ENODEV;
        }
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Clear faults */
        i2c_smbus_write_byte_data(client, LTC4261_FAULT, 0x00);
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&client->dev.kobj, &ltc4261_group);
-       if (ret)
-               return ret;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
-       }
-
-       return 0;
-
-out_hwmon_device_register:
-       sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-       return ret;
-}
-
-static int ltc4261_remove(struct i2c_client *client)
-{
-       struct ltc4261_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          ltc4261_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id ltc4261_id[] = {
@@ -281,7 +256,6 @@ static struct i2c_driver ltc4261_driver = {
                   .name = "ltc4261",
                   },
        .probe = ltc4261_probe,
-       .remove = ltc4261_remove,
        .id_table = ltc4261_id,
 };
 
index 2fa2c02f5569c5af563ac5a90e865917e648fdf2..d4efc79d7b9371e69dc6e5c5972348020f19a1aa 100644 (file)
@@ -83,7 +83,8 @@ static const bool max16065_have_current[] = {
 
 struct max16065_data {
        enum chips type;
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[4];
        struct mutex update_lock;
        bool valid;
        unsigned long last_updated; /* in jiffies */
@@ -144,8 +145,8 @@ static int max16065_read_adc(struct i2c_client *client, int reg)
 
 static struct max16065_data *max16065_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
 
        mutex_lock(&data->update_lock);
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
@@ -186,7 +187,7 @@ static ssize_t max16065_show_alarm(struct device *dev,
 
        val &= (1 << attr2->index);
        if (val)
-               i2c_smbus_write_byte_data(to_i2c_client(dev),
+               i2c_smbus_write_byte_data(data->client,
                                          MAX16065_FAULT(attr2->nr), val);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", !!val);
@@ -223,8 +224,7 @@ static ssize_t max16065_set_limit(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int err;
        int limit;
@@ -238,7 +238,7 @@ static ssize_t max16065_set_limit(struct device *dev,
        mutex_lock(&data->update_lock);
        data->limit[attr2->nr][attr2->index]
          = LIMIT_TO_MV(limit, data->range[attr2->index]);
-       i2c_smbus_write_byte_data(client,
+       i2c_smbus_write_byte_data(data->client,
                                  MAX16065_LIMIT(attr2->nr, attr2->index),
                                  limit);
        mutex_unlock(&data->update_lock);
@@ -250,8 +250,7 @@ static ssize_t max16065_show_limit(struct device *dev,
                                   struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        data->limit[attr2->nr][attr2->index]);
@@ -516,8 +515,32 @@ static struct attribute *max16065_max_attributes[] = {
        NULL
 };
 
+static umode_t max16065_basic_is_visible(struct kobject *kobj,
+                                        struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct max16065_data *data = dev_get_drvdata(dev);
+       int index = n / 4;
+
+       if (index >= data->num_adc || !data->range[index])
+               return 0;
+       return a->mode;
+}
+
+static umode_t max16065_secondary_is_visible(struct kobject *kobj,
+                                            struct attribute *a, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct max16065_data *data = dev_get_drvdata(dev);
+
+       if (index >= data->num_adc)
+               return 0;
+       return a->mode;
+}
+
 static const struct attribute_group max16065_basic_group = {
        .attrs = max16065_basic_attributes,
+       .is_visible = max16065_basic_is_visible,
 };
 
 static const struct attribute_group max16065_current_group = {
@@ -526,38 +549,35 @@ static const struct attribute_group max16065_current_group = {
 
 static const struct attribute_group max16065_min_group = {
        .attrs = max16065_min_attributes,
+       .is_visible = max16065_secondary_is_visible,
 };
 
 static const struct attribute_group max16065_max_group = {
        .attrs = max16065_max_attributes,
+       .is_visible = max16065_secondary_is_visible,
 };
 
-static void max16065_cleanup(struct i2c_client *client)
-{
-       sysfs_remove_group(&client->dev.kobj, &max16065_max_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_min_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_current_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_basic_group);
-}
-
 static int max16065_probe(struct i2c_client *client,
                          const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
        struct max16065_data *data;
-       int i, j, val, ret;
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       int i, j, val;
        bool have_secondary;            /* true if chip has secondary limits */
        bool secondary_is_max = false;  /* secondary limits reflect max */
+       int groups = 0;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
                                     | I2C_FUNC_SMBUS_READ_WORD_DATA))
                return -ENODEV;
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (unlikely(!data))
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        data->num_adc = max16065_num_adc[id->driver_data];
@@ -596,38 +616,16 @@ static int max16065_probe(struct i2c_client *client,
                }
        }
 
-       /* Register sysfs hooks */
-       for (i = 0; i < data->num_adc * 4; i++) {
-               /* Do not create sysfs entry if channel is disabled */
-               if (!data->range[i / 4])
-                       continue;
-
-               ret = sysfs_create_file(&client->dev.kobj,
-                                       max16065_basic_attributes[i]);
-               if (unlikely(ret))
-                       goto out;
-       }
-
-       if (have_secondary) {
-               struct attribute **attr = secondary_is_max ?
-                 max16065_max_attributes : max16065_min_attributes;
-
-               for (i = 0; i < data->num_adc; i++) {
-                       if (!data->range[i])
-                               continue;
-
-                       ret = sysfs_create_file(&client->dev.kobj, attr[i]);
-                       if (unlikely(ret))
-                               goto out;
-               }
-       }
+       /* sysfs hooks */
+       data->groups[groups++] = &max16065_basic_group;
+       if (have_secondary)
+               data->groups[groups++] = secondary_is_max ?
+                       &max16065_max_group : &max16065_min_group;
 
        if (data->have_current) {
                val = i2c_smbus_read_byte_data(client, MAX16065_CURR_CONTROL);
-               if (unlikely(val < 0)) {
-                       ret = val;
-                       goto out;
-               }
+               if (unlikely(val < 0))
+                       return val;
                if (val & MAX16065_CURR_ENABLE) {
                        /*
                         * Current gain is 6, 12, 24, 48 based on values in
@@ -636,33 +634,16 @@ static int max16065_probe(struct i2c_client *client,
                        data->curr_gain = 6 << ((val >> 2) & 0x03);
                        data->range[MAX16065_NUM_ADC]
                          = max16065_csp_adc_range[(val >> 1) & 0x01];
-                       ret = sysfs_create_group(&client->dev.kobj,
-                                                &max16065_current_group);
-                       if (unlikely(ret))
-                               goto out;
+                       data->groups[groups++] = &max16065_current_group;
                } else {
                        data->have_current = false;
                }
        }
 
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (unlikely(IS_ERR(data->hwmon_dev))) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out;
-       }
-       return 0;
-
-out:
-       max16065_cleanup(client);
-       return ret;
-}
-
-static int max16065_remove(struct i2c_client *client)
-{
-       struct max16065_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       max16065_cleanup(client);
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, data->groups);
+       if (unlikely(IS_ERR(hwmon_dev)))
+               return PTR_ERR(hwmon_dev);
 
        return 0;
 }
@@ -685,7 +666,6 @@ static struct i2c_driver max16065_driver = {
                .name = "max16065",
        },
        .probe = max16065_probe,
-       .remove = max16065_remove,
        .id_table = max16065_id,
 };
 
index 57d58cd3220682030fcb3f0d32b62e4e77648c04..8326fbd601508d354f5a037aabdc5420b1018670 100644 (file)
@@ -87,7 +87,7 @@ static int temp_to_reg(int val)
  */
 
 struct max6642_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        bool valid; /* zero until following fields are valid */
        unsigned long last_updated; /* in jiffies */
@@ -102,10 +102,10 @@ struct max6642_data {
  * Real code
  */
 
-static void max6642_init_client(struct i2c_client *client)
+static void max6642_init_client(struct max6642_data *data,
+                               struct i2c_client *client)
 {
        u8 config;
-       struct max6642_data *data = i2c_get_clientdata(client);
 
        /*
         * Start the conversions.
@@ -168,14 +168,14 @@ static int max6642_detect(struct i2c_client *client,
 
 static struct max6642_data *max6642_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6642_data *data = i2c_get_clientdata(client);
+       struct max6642_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u16 val, tmp;
 
        mutex_lock(&data->update_lock);
 
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
-               dev_dbg(&client->dev, "Updating max6642 data.\n");
+               dev_dbg(dev, "Updating max6642 data.\n");
                val = i2c_smbus_read_byte_data(client,
                                        MAX6642_REG_R_LOCAL_TEMPL);
                tmp = (val >> 6) & 3;
@@ -209,8 +209,8 @@ static struct max6642_data *max6642_update_device(struct device *dev)
 static ssize_t show_temp_max10(struct device *dev,
                               struct device_attribute *dev_attr, char *buf)
 {
-       struct max6642_data *data = max6642_update_device(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+       struct max6642_data *data = max6642_update_device(dev);
 
        return sprintf(buf, "%d\n",
                       temp_from_reg10(data->temp_input[attr->index]));
@@ -219,8 +219,8 @@ static ssize_t show_temp_max10(struct device *dev,
 static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct max6642_data *data = max6642_update_device(dev);
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+       struct max6642_data *data = max6642_update_device(dev);
 
        return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
 }
@@ -228,11 +228,10 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
 static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+       struct max6642_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int err;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6642_data *data = i2c_get_clientdata(client);
-       struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
 
        err = kstrtoul(buf, 10, &val);
        if (err < 0)
@@ -240,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
-       i2c_smbus_write_byte_data(client, attr2->index,
+       i2c_smbus_write_byte_data(data->client, attr2->index,
                                  data->temp_high[attr2->nr]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -264,7 +263,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 
-static struct attribute *max6642_attributes[] = {
+static struct attribute *max6642_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp2_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -275,54 +274,29 @@ static struct attribute *max6642_attributes[] = {
        &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
        NULL
 };
+ATTRIBUTE_GROUPS(max6642);
 
-static const struct attribute_group max6642_group = {
-       .attrs = max6642_attributes,
-};
-
-static int max6642_probe(struct i2c_client *new_client,
+static int max6642_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       struct device *dev = &client->dev;
        struct max6642_data *data;
-       int err;
+       struct device *hwmon_dev;
 
-       data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
-                           GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(new_client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Initialize the MAX6642 chip */
-       max6642_init_client(new_client);
+       max6642_init_client(data, client);
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(&new_client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
-exit_remove_files:
-       sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
-       return err;
-}
-
-static int max6642_remove(struct i2c_client *client)
-{
-       struct max6642_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &max6642_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          max6642_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 /*
@@ -341,7 +315,6 @@ static struct i2c_driver max6642_driver = {
                .name   = "max6642",
        },
        .probe          = max6642_probe,
-       .remove         = max6642_remove,
        .id_table       = max6642_id,
        .detect         = max6642_detect,
        .address_list   = normal_i2c,
index 3c16cbd4c00286d465bc2aed825330c0c89e9661..0cafc390db4dd5fc1413ffe0814d6a15567dd352 100644 (file)
@@ -660,7 +660,7 @@ static int max6650_init_client(struct i2c_client *client)
        /*
         * If mode is set to "full off", we change it to "open loop" and
         * set DAC to 255, which has the same effect. We do this because
-        * there's no "full off" mode defined in hwmon specifcations.
+        * there's no "full off" mode defined in hwmon specifications.
         */
 
        if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
index a41b5f3fc5069596d7c9a0c0c164dcefefc7e621..7fd3eaf817f4bffe48267887032b1fa15c81847c 100644 (file)
@@ -77,7 +77,7 @@ struct max6697_chip_data {
 };
 
 struct max6697_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        enum chips type;
        const struct max6697_chip_data *chip;
@@ -181,8 +181,8 @@ static const struct max6697_chip_data max6697_chip_data[] = {
 
 static struct max6697_data *max6697_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct max6697_data *ret = data;
        int val;
        int i;
@@ -303,8 +303,7 @@ static ssize_t set_temp(struct device *dev,
 {
        int nr = to_sensor_dev_attr_2(devattr)->nr;
        int index = to_sensor_dev_attr_2(devattr)->index;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
        long temp;
        int ret;
 
@@ -316,7 +315,7 @@ static ssize_t set_temp(struct device *dev,
        temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
        temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
        data->temp[nr][index] = temp;
-       ret = i2c_smbus_write_byte_data(client,
+       ret = i2c_smbus_write_byte_data(data->client,
                                        index == 2 ? MAX6697_REG_MAX[nr]
                                                   : MAX6697_REG_CRIT[nr],
                                        temp);
@@ -405,8 +404,7 @@ static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr,
                                  int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
        const struct max6697_chip_data *chip = data->chip;
        int channel = index / 6;        /* channel number */
        int nr = index % 6;             /* attribute index within channel */
@@ -489,6 +487,7 @@ static struct attribute *max6697_attributes[] = {
 static const struct attribute_group max6697_group = {
        .attrs = max6697_attributes, .is_visible = max6697_is_visible,
 };
+__ATTRIBUTE_GROUPS(max6697);
 
 static void max6697_get_config_of(struct device_node *node,
                                  struct max6697_platform_data *pdata)
@@ -525,9 +524,9 @@ static void max6697_get_config_of(struct device_node *node,
        }
 }
 
-static int max6697_init_chip(struct i2c_client *client)
+static int max6697_init_chip(struct max6697_data *data,
+                            struct i2c_client *client)
 {
-       struct max6697_data *data = i2c_get_clientdata(client);
        struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
        struct max6697_platform_data p;
        const struct max6697_chip_data *chip = data->chip;
@@ -625,6 +624,7 @@ static int max6697_probe(struct i2c_client *client,
        struct i2c_adapter *adapter = client->adapter;
        struct device *dev = &client->dev;
        struct max6697_data *data;
+       struct device *hwmon_dev;
        int err;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -636,39 +636,17 @@ static int max6697_probe(struct i2c_client *client,
 
        data->type = id->driver_data;
        data->chip = &max6697_chip_data[data->type];
-
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
-       err = max6697_init_chip(client);
-       if (err)
-               return err;
-
-       err = sysfs_create_group(&client->dev.kobj, &max6697_group);
+       err = max6697_init_chip(data, client);
        if (err)
                return err;
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto error;
-       }
-
-       return 0;
-
-error:
-       sysfs_remove_group(&client->dev.kobj, &max6697_group);
-       return err;
-}
-
-static int max6697_remove(struct i2c_client *client)
-{
-       struct max6697_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &max6697_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          max6697_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id max6697_id[] = {
@@ -692,7 +670,6 @@ static struct i2c_driver max6697_driver = {
                .name   = "max6697",
        },
        .probe = max6697_probe,
-       .remove = max6697_remove,
        .id_table = max6697_id,
 };
 
index 982d8622c09b375f080bc2c81bfbacb71cf0896c..ae00e60d856c63c4a2a82a4cd6f5a4e994c31e64 100644 (file)
@@ -37,7 +37,7 @@
 struct mc13783_adc_priv {
        struct mc13xxx *mc13xxx;
        struct device *hwmon_dev;
-       char name[10];
+       char name[PLATFORM_NAME_SIZE];
 };
 
 static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
index 6eb03ce2cff4b46a910787f8a7954bf5d00c6f99..d17325db0ea3d9f2478fc2fd7c42641603797e92 100644 (file)
@@ -724,11 +724,8 @@ struct nct6775_data {
        enum kinds kind;
        const char *name;
 
-       struct device *hwmon_dev;
-       struct attribute_group *group_in;
-       struct attribute_group *group_fan;
-       struct attribute_group *group_temp;
-       struct attribute_group *group_pwm;
+       int num_attr_groups;
+       const struct attribute_group *groups[6];
 
        u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
                                    * 3=temp_crit, 4=temp_lcrit
@@ -942,7 +939,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
        struct sensor_device_attribute_2 *a2;
        struct attribute **attrs;
        struct sensor_device_template **t;
-       int err, i, j, count;
+       int i, count;
 
        if (repeat <= 0)
                return ERR_PTR(-EINVAL);
@@ -973,7 +970,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
 
        for (i = 0; i < repeat; i++) {
                t = tg->templates;
-               for (j = 0; *t != NULL; j++) {
+               while (*t != NULL) {
                        snprintf(su->name, sizeof(su->name),
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
@@ -1002,10 +999,6 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                }
        }
 
-       err = sysfs_create_group(&dev->kobj, group);
-       if (err)
-               return ERR_PTR(-ENOMEM);
-
        return group;
 }
 
@@ -1457,7 +1450,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                                          = nct6775_read_temp(data,
                                                data->reg_temp[j][i]);
                        }
-                       if (!(data->have_temp_fixed & (1 << i)))
+                       if (i >= NUM_TEMP_FIXED ||
+                           !(data->have_temp_fixed & (1 << i)))
                                continue;
                        data->temp_offset[i]
                          = nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
@@ -1545,7 +1539,7 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
                if (src == source)
                        return nr;
        }
-       return -1;
+       return -ENODEV;
 }
 
 static ssize_t
@@ -1644,7 +1638,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
 
        nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
        if (nr < 0)
-               return -ENODEV;
+               return nr;
 
        bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
        regindex = bit >> 3;
@@ -2725,16 +2719,6 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct nct6775_data *data = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%s\n", data->name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
 static ssize_t
 show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -3061,16 +3045,16 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct nct6775_data *data = dev_get_drvdata(dev);
 
-       if (index == 1 && !data->have_vid)
+       if (index == 0 && !data->have_vid)
                return 0;
 
-       if (index == 2 || index == 3) {
-               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+       if (index == 1 || index == 2) {
+               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 1] < 0)
                        return 0;
        }
 
-       if (index == 4 || index == 5) {
-               if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+       if (index == 3 || index == 4) {
+               if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 3] < 0)
                        return 0;
        }
 
@@ -3083,13 +3067,12 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
  * Any change in order or content must be matched.
  */
 static struct attribute *nct6775_attributes_other[] = {
-       &dev_attr_name.attr,
-       &dev_attr_cpu0_vid.attr,                                /* 1 */
-       &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,        /* 2 */
-       &sensor_dev_attr_intrusion1_alarm.dev_attr.attr,        /* 3 */
-       &sensor_dev_attr_intrusion0_beep.dev_attr.attr,         /* 4 */
-       &sensor_dev_attr_intrusion1_beep.dev_attr.attr,         /* 5 */
-       &sensor_dev_attr_beep_enable.dev_attr.attr,             /* 6 */
+       &dev_attr_cpu0_vid.attr,                                /* 0 */
+       &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,        /* 1 */
+       &sensor_dev_attr_intrusion1_alarm.dev_attr.attr,        /* 2 */
+       &sensor_dev_attr_intrusion0_beep.dev_attr.attr,         /* 3 */
+       &sensor_dev_attr_intrusion1_beep.dev_attr.attr,         /* 4 */
+       &sensor_dev_attr_beep_enable.dev_attr.attr,             /* 5 */
 
        NULL
 };
@@ -3099,27 +3082,6 @@ static const struct attribute_group nct6775_group_other = {
        .is_visible = nct6775_other_is_visible,
 };
 
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
-{
-       struct nct6775_data *data = dev_get_drvdata(dev);
-
-       if (data->group_pwm)
-               sysfs_remove_group(&dev->kobj, data->group_pwm);
-       if (data->group_in)
-               sysfs_remove_group(&dev->kobj, data->group_in);
-       if (data->group_fan)
-               sysfs_remove_group(&dev->kobj, data->group_fan);
-       if (data->group_temp)
-               sysfs_remove_group(&dev->kobj, data->group_temp);
-
-       sysfs_remove_group(&dev->kobj, &nct6775_group_other);
-}
-
-/* Get the monitoring functions started */
 static inline void nct6775_init_device(struct nct6775_data *data)
 {
        int i;
@@ -3296,6 +3258,7 @@ static int nct6775_probe(struct platform_device *pdev)
        int num_reg_temp;
        u8 cr2a;
        struct attribute_group *group;
+       struct device *hwmon_dev;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
        if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3870,61 +3833,36 @@ static int nct6775_probe(struct platform_device *pdev)
        /* Register sysfs hooks */
        group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
                                          data->pwm_num);
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_pwm = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
                                          fls(data->have_in));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_in = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
                                          fls(data->has_fan));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_fan = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
                                          fls(data->have_temp));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_temp = group;
-
-       err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
-       if (err)
-               goto exit_remove;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       nct6775_device_remove_files(dev);
-       return err;
-}
-
-static int nct6775_remove(struct platform_device *pdev)
-{
-       struct nct6775_data *data = platform_get_drvdata(pdev);
+       data->groups[data->num_attr_groups++] = group;
+       data->groups[data->num_attr_groups++] = &nct6775_group_other;
 
-       hwmon_device_unregister(data->hwmon_dev);
-       nct6775_device_remove_files(&pdev->dev);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
+                                                          data, data->groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 #ifdef CONFIG_PM
@@ -4013,7 +3951,6 @@ static struct platform_driver nct6775_driver = {
                .pm     = NCT6775_DEV_PM_OPS,
        },
        .probe          = nct6775_probe,
-       .remove         = nct6775_remove,
 };
 
 static const char * const nct6775_sio_names[] __initconst = {
@@ -4101,7 +4038,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 /*
  * when Super-I/O functions move to a separate file, the Super-I/O
  * bus will manage the lifetime of the device and this module will only keep
- * track of the nct6775 driver. But since we platform_device_alloc(), we
+ * track of the nct6775 driver. But since we use platform_device_alloc(), we
  * must keep track of the device
  */
 static struct platform_device *pdev[2];
index 6a9d6edaacb3947a6080162d2ce9b8ddc0c9d59c..a26b1d1d95146b5fd82a39b5b0ffe6f40910f4af 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
  *
  * Copyright (c) 2011 Ericsson AB.
  * Copyright (c) 2013 Guenter Roeck
@@ -27,7 +27,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { lm25056, lm25066, lm5064, lm5066 };
+enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
 
 #define LM25066_READ_VAUX              0xd0
 #define LM25066_MFR_READ_IIN           0xd1
@@ -52,6 +52,11 @@ enum chips { lm25056, lm25066, lm5064, lm5066 };
 #define LM25056_MFR_STS_VAUX_OV_WARN   (1 << 1)
 #define LM25056_MFR_STS_VAUX_UV_WARN   (1 << 0)
 
+/* LM25063 only */
+
+#define LM25063_READ_VOUT_MAX          0xe5
+#define LM25063_READ_VOUT_MIN          0xe6
+
 struct __coeff {
        short m, b, R;
 };
@@ -59,7 +64,7 @@ struct __coeff {
 #define PSC_CURRENT_IN_L       (PSC_NUM_CLASSES)
 #define PSC_POWER_L            (PSC_NUM_CLASSES + 1)
 
-static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
+static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
        [lm25056] = {
                [PSC_VOLTAGE_IN] = {
                        .m = 16296,
@@ -116,6 +121,36 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
                        .m = 16,
                },
        },
+       [lm25063] = {
+               [PSC_VOLTAGE_IN] = {
+                       .m = 16000,
+                       .R = -2,
+               },
+               [PSC_VOLTAGE_OUT] = {
+                       .m = 16000,
+                       .R = -2,
+               },
+               [PSC_CURRENT_IN] = {
+                       .m = 10000,
+                       .R = -2,
+               },
+               [PSC_CURRENT_IN_L] = {
+                       .m = 10000,
+                       .R = -2,
+               },
+               [PSC_POWER] = {
+                       .m = 5000,
+                       .R = -3,
+               },
+               [PSC_POWER_L] = {
+                       .m = 5000,
+                       .R = -3,
+               },
+               [PSC_TEMPERATURE] = {
+                       .m = 15596,
+                       .R = -3,
+               },
+       },
        [lm5064] = {
                [PSC_VOLTAGE_IN] = {
                        .m = 4611,
@@ -178,6 +213,7 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
 
 struct lm25066_data {
        int id;
+       u16 rlimit;                     /* Maximum register value */
        struct pmbus_driver_info info;
 };
 
@@ -200,6 +236,10 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
                        /* VIN: 6.14 mV VAUX: 293 uV LSB */
                        ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
                        break;
+               case lm25063:
+                       /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
+                       ret = DIV_ROUND_CLOSEST(ret * 20, 625);
+                       break;
                case lm25066:
                        /* VIN: 4.54 mV VAUX: 283.2 uV LSB */
                        ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -253,6 +293,24 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
        return ret;
 }
 
+static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
+{
+       int ret;
+
+       switch (reg) {
+       case PMBUS_VIRT_READ_VOUT_MAX:
+               ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
+               break;
+       case PMBUS_VIRT_READ_VOUT_MIN:
+               ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
+               break;
+       default:
+               ret = lm25066_read_word_data(client, page, reg);
+               break;
+       }
+       return ret;
+}
+
 static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
 {
        int ret;
@@ -308,27 +366,34 @@ static int lm25056_read_byte_data(struct i2c_client *client, int page, int reg)
 static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
                                   u16 word)
 {
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       const struct lm25066_data *data = to_lm25066_data(info);
        int ret;
 
        switch (reg) {
+       case PMBUS_POUT_OP_FAULT_LIMIT:
+       case PMBUS_POUT_OP_WARN_LIMIT:
        case PMBUS_VOUT_UV_WARN_LIMIT:
        case PMBUS_OT_FAULT_LIMIT:
        case PMBUS_OT_WARN_LIMIT:
+       case PMBUS_IIN_OC_FAULT_LIMIT:
        case PMBUS_VIN_UV_WARN_LIMIT:
+       case PMBUS_VIN_UV_FAULT_LIMIT:
+       case PMBUS_VIN_OV_FAULT_LIMIT:
        case PMBUS_VIN_OV_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0, reg, word);
                pmbus_clear_cache(client);
                break;
        case PMBUS_IIN_OC_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25066_MFR_IIN_OC_WARN_LIMIT,
                                            word);
                pmbus_clear_cache(client);
                break;
        case PMBUS_PIN_OP_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25066_MFR_PIN_OP_WARN_LIMIT,
                                            word);
@@ -337,7 +402,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
        case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
                /* Adjust from VIN coefficients (for LM25056) */
                word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25056_VAUX_UV_WARN_LIMIT, word);
                pmbus_clear_cache(client);
@@ -345,7 +410,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
        case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
                /* Adjust from VIN coefficients (for LM25056) */
                word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25056_VAUX_OV_WARN_LIMIT, word);
                pmbus_clear_cache(client);
@@ -399,9 +464,16 @@ static int lm25066_probe(struct i2c_client *client,
                info->func[0] |= PMBUS_HAVE_STATUS_VMON;
                info->read_word_data = lm25056_read_word_data;
                info->read_byte_data = lm25056_read_byte_data;
+               data->rlimit = 0x0fff;
+       } else if (data->id == lm25063) {
+               info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+                 | PMBUS_HAVE_POUT;
+               info->read_word_data = lm25063_read_word_data;
+               data->rlimit = 0xffff;
        } else {
                info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
                info->read_word_data = lm25066_read_word_data;
+               data->rlimit = 0x0fff;
        }
        info->write_word_data = lm25066_write_word_data;
 
@@ -432,6 +504,7 @@ static int lm25066_probe(struct i2c_client *client,
 
 static const struct i2c_device_id lm25066_id[] = {
        {"lm25056", lm25056},
+       {"lm25063", lm25063},
        {"lm25066", lm25066},
        {"lm5064", lm5064},
        {"lm5066", lm5066},
@@ -453,5 +526,5 @@ static struct i2c_driver lm25066_driver = {
 module_i2c_driver(lm25066_driver);
 
 MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LM25056/LM25066/LM5064/LM5066");
+MODULE_DESCRIPTION("PMBus driver for LM25066 and compatible chips");
 MODULE_LICENSE("GPL");
index 586a89ef9e0f4f28a6264fd48e00273fad2a1c5a..de3c152a1d9a1f2ed011b679f04ed2f6de74308b 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Hardware monitoring driver for LTC2974, LTC2978, LTC3880, and LTC3883
+ * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
+ * and LTC3883
  *
  * Copyright (c) 2011 Ericsson AB.
  * Copyright (c) 2013 Guenter Roeck
@@ -27,7 +28,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
 
 /* Common for all chips */
 #define LTC2978_MFR_VOUT_PEAK          0xdd
@@ -35,7 +36,7 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
 #define LTC2978_MFR_TEMPERATURE_PEAK   0xdf
 #define LTC2978_MFR_SPECIAL_ID         0xe7
 
-/* LTC2974 and LTC2978 */
+/* LTC2974, LCT2977, and LTC2978 */
 #define LTC2978_MFR_VOUT_MIN           0xfb
 #define LTC2978_MFR_VIN_MIN            0xfc
 #define LTC2978_MFR_TEMPERATURE_MIN    0xfd
@@ -53,8 +54,10 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
 #define LTC3883_MFR_IIN_PEAK           0xe1
 
 #define LTC2974_ID                     0x0212
+#define LTC2977_ID                     0x0130
 #define LTC2978_ID_REV1                        0x0121
 #define LTC2978_ID_REV2                        0x0122
+#define LTC2978A_ID                    0x0124
 #define LTC3880_ID                     0x4000
 #define LTC3880_ID_MASK                        0xff00
 #define LTC3883_ID                     0x4300
@@ -363,6 +366,7 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
 
 static const struct i2c_device_id ltc2978_id[] = {
        {"ltc2974", ltc2974},
+       {"ltc2977", ltc2977},
        {"ltc2978", ltc2978},
        {"ltc3880", ltc3880},
        {"ltc3883", ltc3883},
@@ -392,7 +396,10 @@ static int ltc2978_probe(struct i2c_client *client,
 
        if (chip_id == LTC2974_ID) {
                data->id = ltc2974;
-       } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) {
+       } else if (chip_id == LTC2977_ID) {
+               data->id = ltc2977;
+       } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
+                  chip_id == LTC2978A_ID) {
                data->id = ltc2978;
        } else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
                data->id = ltc3880;
@@ -438,6 +445,7 @@ static int ltc2978_probe(struct i2c_client *client,
                          | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
                }
                break;
+       case ltc2977:
        case ltc2978:
                info->read_word_data = ltc2978_read_word_data;
                info->pages = LTC2978_NUM_PAGES;
index 9319fcf142d96656040eabcc0d411699f61f4b06..3cbf66e9d861968863e01062902bdd699ca648e9 100644 (file)
@@ -97,6 +97,7 @@ struct pmbus_data {
        int max_attributes;
        int num_attributes;
        struct attribute_group group;
+       const struct attribute_group *groups[2];
 
        struct pmbus_sensor *sensors;
 
@@ -156,7 +157,7 @@ EXPORT_SYMBOL_GPL(pmbus_write_byte);
 
 /*
  * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
+ * a device specific mapping function exists and calls it if necessary.
  */
 static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
 {
@@ -348,7 +349,7 @@ static struct _pmbus_status {
 
 static struct pmbus_data *pmbus_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
+       struct i2c_client *client = to_i2c_client(dev->parent);
        struct pmbus_data *data = i2c_get_clientdata(client);
        const struct pmbus_driver_info *info = data->info;
        struct pmbus_sensor *sensor;
@@ -686,7 +687,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
        if (!s1 && !s2) {
                ret = !!regval;
        } else if (!s1 || !s2) {
-               BUG();
+               WARN(1, "Bad boolean descriptor %p: s1=%p, s2=%p\n", b, s1, s2);
                return 0;
        } else {
                long v1, v2;
@@ -733,7 +734,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
                                struct device_attribute *devattr,
                                const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
+       struct i2c_client *client = to_i2c_client(dev->parent);
        struct pmbus_data *data = i2c_get_clientdata(client);
        struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
        ssize_t rv = count;
@@ -1768,22 +1769,16 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
                goto out_kfree;
        }
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&dev->kobj, &data->group);
-       if (ret) {
-               dev_err(dev, "Failed to create sysfs entries\n");
-               goto out_kfree;
-       }
-       data->hwmon_dev = hwmon_device_register(dev);
+       data->groups[0] = &data->group;
+       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+                                                           data, data->groups);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
                dev_err(dev, "Failed to register hwmon device\n");
-               goto out_hwmon_device_register;
+               goto out_kfree;
        }
        return 0;
 
-out_hwmon_device_register:
-       sysfs_remove_group(&dev->kobj, &data->group);
 out_kfree:
        kfree(data->group.attrs);
        return ret;
@@ -1794,7 +1789,6 @@ int pmbus_do_remove(struct i2c_client *client)
 {
        struct pmbus_data *data = i2c_get_clientdata(client);
        hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &data->group);
        kfree(data->group.attrs);
        return 0;
 }
index dfe6d9527efb4b6489676468d495e56e7841b2a9..7fa6e7d0b9b6f93ea634954cd1d71d0a67b1694b 100644 (file)
@@ -155,7 +155,8 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
  */
 
 struct tmp401_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[3];
        struct mutex update_lock;
        char valid; /* zero until following fields are valid */
        unsigned long last_updated; /* in jiffies */
@@ -231,8 +232,8 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
 
 static struct tmp401_data *tmp401_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct tmp401_data *ret = data;
        int i, val;
        unsigned long next_update;
@@ -350,15 +351,12 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
 {
        int nr = to_sensor_dev_attr_2(devattr)->nr;
        int index = to_sensor_dev_attr_2(devattr)->index;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = tmp401_update_device(dev);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
        u16 reg;
        u8 regaddr;
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
-
        if (kstrtol(buf, 10, &val))
                return -EINVAL;
 
@@ -405,7 +403,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
        val = clamp_val(val, temp - 255000, temp);
        reg = ((temp - val) + 500) / 1000;
 
-       i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_HYST,
+       i2c_smbus_write_byte_data(data->client, TMP401_TEMP_CRIT_HYST,
                                  reg);
 
        data->temp_crit_hyst = reg;
@@ -423,8 +421,8 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
 static ssize_t reset_temp_history(struct device *dev,
        struct device_attribute *devattr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
 
        if (kstrtol(buf, 10, &val))
@@ -447,8 +445,7 @@ static ssize_t reset_temp_history(struct device *dev,
 static ssize_t show_update_interval(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%u\n", data->update_interval);
 }
@@ -457,8 +454,8 @@ static ssize_t set_update_interval(struct device *dev,
                                   struct device_attribute *attr,
                                   const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long val;
        int err, rate;
 
@@ -616,10 +613,10 @@ static const struct attribute_group tmp432_group = {
  * Begin non sysfs callback code (aka Real code)
  */
 
-static void tmp401_init_client(struct i2c_client *client)
+static void tmp401_init_client(struct tmp401_data *data,
+                              struct i2c_client *client)
 {
        int config, config_orig;
-       struct tmp401_data *data = i2c_get_clientdata(client);
 
        /* Set the conversion rate to 2 Hz */
        i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
@@ -705,77 +702,45 @@ static int tmp401_detect(struct i2c_client *client,
        return 0;
 }
 
-static int tmp401_remove(struct i2c_client *client)
-{
-       struct device *dev = &client->dev;
-       struct tmp401_data *data = i2c_get_clientdata(client);
-
-       if (data->hwmon_dev)
-               hwmon_device_unregister(data->hwmon_dev);
-
-       sysfs_remove_group(&dev->kobj, &tmp401_group);
-
-       if (data->kind == tmp411)
-               sysfs_remove_group(&dev->kobj, &tmp411_group);
-
-       if (data->kind == tmp432)
-               sysfs_remove_group(&dev->kobj, &tmp432_group);
-
-       return 0;
-}
-
 static int tmp401_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
+       const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
        struct device *dev = &client->dev;
-       int err;
+       struct device *hwmon_dev;
        struct tmp401_data *data;
-       const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
+       int groups = 0;
 
        data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
        data->kind = id->driver_data;
 
        /* Initialize the TMP401 chip */
-       tmp401_init_client(client);
+       tmp401_init_client(data, client);
 
        /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &tmp401_group);
-       if (err)
-               return err;
+       data->groups[groups++] = &tmp401_group;
 
        /* Register additional tmp411 sysfs hooks */
-       if (data->kind == tmp411) {
-               err = sysfs_create_group(&dev->kobj, &tmp411_group);
-               if (err)
-                       goto exit_remove;
-       }
+       if (data->kind == tmp411)
+               data->groups[groups++] = &tmp411_group;
 
        /* Register additional tmp432 sysfs hooks */
-       if (data->kind == tmp432) {
-               err = sysfs_create_group(&dev->kobj, &tmp432_group);
-               if (err)
-                       goto exit_remove;
-       }
+       if (data->kind == tmp432)
+               data->groups[groups++] = &tmp432_group;
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               data->hwmon_dev = NULL;
-               goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
        dev_info(dev, "Detected TI %s chip\n", names[data->kind]);
 
        return 0;
-
-exit_remove:
-       tmp401_remove(client);
-       return err;
 }
 
 static struct i2c_driver tmp401_driver = {
@@ -784,7 +749,6 @@ static struct i2c_driver tmp401_driver = {
                .name   = "tmp401",
        },
        .probe          = tmp401_probe,
-       .remove         = tmp401_remove,
        .id_table       = tmp401_id,
        .detect         = tmp401_detect,
        .address_list   = normal_i2c,
index a3feee332e200bd1cd1fd38e91b32a2b9ba00e28..bdcf2dce5ec4088e8f37f4c4c62d81f6e469b708 100644 (file)
@@ -1043,7 +1043,7 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
        SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
 };
 
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
 static ssize_t show_alarms_reg(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
index 5febb43cb4c1032de659cbf3ec3ffa652c8c832f..df585808adb65feb2f53c458799010f68ca4bcc2 100644 (file)
@@ -579,7 +579,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
 static ssize_t
 show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
 {
index b0c30a546ff2e3079510e4858c6e8e15899ba092..9d63d71214cade1cddad0b8bfbbbbae726c4dda5 100644 (file)
@@ -808,7 +808,7 @@ show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
        if (nr == TEMP_FAN_MAP) {
                val = data->temp_fan_map[index];
        } else if (nr == TEMP_PWM_ENABLE) {
-               /* +2 to transfrom into 2 and 3 to conform with sysfs intf */
+               /* +2 to transform into 2 and 3 to conform with sysfs intf */
                val = ((data->pwm_enable >> index) & 0x01) + 2;
        } else if (nr == TEMP_CRUISE) {
                val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
@@ -1199,7 +1199,8 @@ static void w83793_init_client(struct i2c_client *client)
 
 static int watchdog_set_timeout(struct w83793_data *data, int timeout)
 {
-       int ret, mtimeout;
+       unsigned int mtimeout;
+       int ret;
 
        mtimeout = DIV_ROUND_UP(timeout, 60);
 
index 35a473ba3d81d5afb9cb0188f2141c2dc417c737..3b9bd9a3f2b08a0a43683c87f1801d2f330185c3 100644 (file)
@@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
        p_adap->retries = 3;
 
        rc = peripheral_request_list(
-                       (unsigned short *)dev_get_platdata(&pdev->dev),
+                       dev_get_platdata(&pdev->dev),
                        "i2c-bfin-twi");
        if (rc) {
                dev_err(&pdev->dev, "Can't setup pin mux!\n");
@@ -723,7 +723,7 @@ out_error_add_adapter:
        free_irq(iface->irq, iface);
 out_error_req_irq:
 out_error_no_irq:
-       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+       peripheral_free_list(dev_get_platdata(&pdev->dev));
 out_error_pin_mux:
        iounmap(iface->regs_base);
 out_error_ioremap:
@@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&(iface->adap));
        free_irq(iface->irq, iface);
-       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+       peripheral_free_list(dev_get_platdata(&pdev->dev));
        iounmap(iface->regs_base);
        kfree(iface);
 
index 132369fad4e0fefe09e10dd88ade2eaf9a8b1972..85e8ad6056c4abce8f02bab4afc2e0a9a1cc0e15 100644 (file)
@@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = {
                .name   = "i2c_davinci",
                .owner  = THIS_MODULE,
                .pm     = davinci_i2c_pm_ops,
-               .of_match_table = of_match_ptr(davinci_i2c_of_match),
+               .of_match_table = davinci_i2c_of_match,
        },
 };
 
index dbecf08399f86dd30baa7fa7b8964e2daf616bcb..5888feef1ac5a959b5f0471a3fb71b9a2b00bada 100644 (file)
@@ -98,6 +98,8 @@
 
 #define DW_IC_ERR_TX_ABRT      0x1
 
+#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+
 /*
  * status codes
  */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
 {
        struct i2c_msg *msgs = dev->msgs;
-       u32 ic_con;
+       u32 ic_con, ic_tar = 0;
 
        /* Disable the adapter */
        __i2c_dw_enable(dev, false);
 
-       /* set the slave (target) address */
-       dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
-
        /* if the slave address is ten bit address, enable 10BITADDR */
        ic_con = dw_readl(dev, DW_IC_CON);
-       if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+       if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
                ic_con |= DW_IC_CON_10BITADDR_MASTER;
-       else
+               /*
+                * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
+                * mode has to be enabled via bit 12 of IC_TAR register.
+                * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+                * detected from registers.
+                */
+               ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+       } else {
                ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+       }
+
        dw_writel(dev, ic_con, DW_IC_CON);
 
+       /*
+        * Set the slave (target) address and enable 10-bit addressing mode
+        * if applicable.
+        */
+       dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
index 4c1b60539a2515c34ee4df0e7c353dbd7860dc56..0aa01136f8d955148d984ac00e06ffb9e8ce1196 100644 (file)
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
 MODULE_ALIAS("platform:i2c_designware");
 
 static struct platform_driver dw_i2c_driver = {
-       .remove         = dw_i2c_remove,
+       .probe = dw_i2c_probe,
+       .remove = dw_i2c_remove,
        .driver         = {
                .name   = "i2c_designware",
                .owner  = THIS_MODULE,
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = {
 
 static int __init dw_i2c_init_driver(void)
 {
-       return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+       return platform_driver_register(&dw_i2c_driver);
 }
 subsys_initcall(dw_i2c_init_driver);
 
index ccf46656bdad9182e2d75d6c1f3dbf42ea0c32c6..1d7efa3169cd772ed2ba580b0c8cfd149eee09d5 100644 (file)
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
        clk_disable_unprepare(i2c_imx->clk);
 }
 
-static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
                                                        unsigned int rate)
 {
        struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = {
        .functionality  = i2c_imx_func,
 };
 
-static int __init i2c_imx_probe(struct platform_device *pdev)
+static int i2c_imx_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
                                                           &pdev->dev);
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
        return 0;   /* Return OK */
 }
 
-static int __exit i2c_imx_remove(struct platform_device *pdev)
+static int i2c_imx_remove(struct platform_device *pdev)
 {
        struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
 
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver i2c_imx_driver = {
-       .remove         = __exit_p(i2c_imx_remove),
+       .probe = i2c_imx_probe,
+       .remove = i2c_imx_remove,
        .driver = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = {
 
 static int __init i2c_adap_imx_init(void)
 {
-       return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe);
+       return platform_driver_register(&i2c_imx_driver);
 }
 subsys_initcall(i2c_adap_imx_init);
 
index 8ed79a086f858012b0f22b291834a5fb357d075c..1672effbcebb23894b99deac27bb84d595fa28d2 100644 (file)
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
 
        desc = &priv->hw[priv->head];
 
+       /* Initialize the DMA buffer */
+       memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
+
        /* Initialize the descriptor */
        memset(desc, 0, sizeof(struct ismt_desc));
        desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
index 7f3a4744349476f941a4500c9edc6a65a6da0fe2..8be7e42aa4de88ba3f00ef2275455b27fa5cdf96 100644 (file)
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
                ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
                    (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
 
-               writel_relaxed(data_reg_lo,
+               writel(data_reg_lo,
                        drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
-               writel_relaxed(data_reg_hi,
+               writel(data_reg_hi,
                        drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
 
        } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
 MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
 
 #ifdef CONFIG_OF
+#ifdef CONFIG_HAVE_CLK
 static int
 mv64xxx_calc_freq(const int tclk, const int n, const int m)
 {
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
                return false;
        return true;
 }
+#endif /* CONFIG_HAVE_CLK */
 
 static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
                  struct device *dev)
 {
-       const struct of_device_id *device;
-       struct device_node *np = dev->of_node;
-       u32 bus_freq, tclk;
-       int rc = 0;
-
        /* CLK is mandatory when using DT to describe the i2c bus. We
         * need to know tclk in order to calculate bus clock
         * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
        /* Have OF but no CLK */
        return -ENODEV;
 #else
+       const struct of_device_id *device;
+       struct device_node *np = dev->of_node;
+       u32 bus_freq, tclk;
+       int rc = 0;
+
        if (IS_ERR(drv_data->clk)) {
                rc = -ENODEV;
                goto out;
@@ -909,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = MV64XXX_I2C_CTLR_NAME,
-               .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table),
+               .of_match_table = mv64xxx_i2c_of_match_table,
        },
 };
 
index f4a01675fa71b4a18ac2b35714425be363442528..99fe86e24fba0428adcf7a7ae27549be2138d8a7 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Freescale MXS I2C bus driver
  *
+ * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de>
  * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
  *
  * based on a (non-working) driver which was:
 
 #define MXS_I2C_CTRL0          (0x00)
 #define MXS_I2C_CTRL0_SET      (0x04)
+#define MXS_I2C_CTRL0_CLR      (0x08)
 
 #define MXS_I2C_CTRL0_SFTRST                   0x80000000
 #define MXS_I2C_CTRL0_RUN                      0x20000000
 #define MXS_I2C_CTRL0_SEND_NAK_ON_LAST         0x02000000
+#define MXS_I2C_CTRL0_PIO_MODE                 0x01000000
 #define MXS_I2C_CTRL0_RETAIN_CLOCK             0x00200000
 #define MXS_I2C_CTRL0_POST_SEND_STOP           0x00100000
 #define MXS_I2C_CTRL0_PRE_SEND_START           0x00080000
 #define MXS_I2C_CTRL1_SLAVE_IRQ                        0x01
 
 #define MXS_I2C_STAT           (0x50)
+#define MXS_I2C_STAT_GOT_A_NAK                 0x10000000
 #define MXS_I2C_STAT_BUS_BUSY                  0x00000800
 #define MXS_I2C_STAT_CLK_GEN_BUSY              0x00000400
 
-#define MXS_I2C_DATA           (0xa0)
+#define MXS_I2C_DATA(i2c)      ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0)
 
-#define MXS_I2C_DEBUG0         (0xb0)
-#define MXS_I2C_DEBUG0_CLR     (0xb8)
+#define MXS_I2C_DEBUG0_CLR(i2c)        ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8)
 
 #define MXS_I2C_DEBUG0_DMAREQ  0x80000000
 
 #define MXS_CMD_I2C_READ       (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
                                 MXS_I2C_CTRL0_MASTER_MODE)
 
+enum mxs_i2c_devtype {
+       MXS_I2C_UNKNOWN = 0,
+       MXS_I2C_V1,
+       MXS_I2C_V2,
+};
+
 /**
  * struct mxs_i2c_dev - per device, private MXS-I2C data
  *
  * @dev: driver model device node
+ * @dev_type: distinguish i.MX23/i.MX28 features
  * @regs: IO registers pointer
  * @cmd_complete: completion object for transaction wait
  * @cmd_err: error code for last transaction
  */
 struct mxs_i2c_dev {
        struct device *dev;
+       enum mxs_i2c_devtype dev_type;
        void __iomem *regs;
        struct completion cmd_complete;
        int cmd_err;
@@ -291,48 +302,11 @@ write_init_pio_fail:
        return -EINVAL;
 }
 
-static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 
-       while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
-               MXS_I2C_DEBUG0_DMAREQ)) {
-               if (time_after(jiffies, timeout))
-                       return -ETIMEDOUT;
-               cond_resched();
-       }
-
-       return 0;
-}
-
-static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-       /*
-        * We do not use interrupts in the PIO mode. Due to the
-        * maximum transfer length being 8 bytes in PIO mode, the
-        * overhead of interrupt would be too large and this would
-        * neglect the gain from using the PIO mode.
-        */
-
-       while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
-               MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
-               if (time_after(jiffies, timeout))
-                       return -ETIMEDOUT;
-               cond_resched();
-       }
-
-       writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
-               i2c->regs + MXS_I2C_CTRL1_CLR);
-
-       /*
-        * When ending a transfer with a stop, we have to wait for the bus to
-        * go idle before we report the transfer as completed. Otherwise the
-        * start of the next transfer may race with the end of the current one.
-        */
-       while (last && (readl(i2c->regs + MXS_I2C_STAT) &
-                       (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) {
+       while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) {
                if (time_after(jiffies, timeout))
                        return -ETIMEDOUT;
                cond_resched();
@@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd)
        writel(reg, i2c->regs + MXS_I2C_CTRL0);
 }
 
+/*
+ * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet,
+ * CTRL0::PIO_MODE bit description clarifies the order in which the registers
+ * must be written during PIO mode operation. First, the CTRL0 register has
+ * to be programmed with all the necessary bits but the RUN bit. Then the
+ * payload has to be written into the DATA register. Finally, the transmission
+ * is executed by setting the RUN bit in CTRL0.
+ */
+static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd,
+                                         u32 data)
+{
+       writel(cmd, i2c->regs + MXS_I2C_CTRL0);
+
+       if (i2c->dev_type == MXS_I2C_V1)
+               writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET);
+
+       writel(data, i2c->regs + MXS_I2C_DATA(i2c));
+       writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET);
+}
+
 static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
                        struct i2c_msg *msg, uint32_t flags)
 {
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
        uint32_t addr_data = msg->addr << 1;
        uint32_t data = 0;
-       int i, shifts_left, ret;
+       int i, ret, xlen = 0, xmit = 0;
+       uint32_t start;
 
        /* Mute IRQs coming from this block. */
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
 
+       /*
+        * MX23 idea:
+        * - Enable CTRL0::PIO_MODE (1 << 24)
+        * - Enable CTRL1::ACK_MODE (1 << 27)
+        *
+        * WARNING! The MX23 is broken in some way, even if it claims
+        * to support PIO, when we try to transfer any amount of data
+        * that is not aligned to 4 bytes, the DMA engine will have
+        * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the
+        * transfer. This in turn will mess up the next transfer as
+        * the block it emit one byte write onto the bus terminated
+        * with a NAK+STOP. A possible workaround is to reset the IP
+        * block after every PIO transmission, which might just work.
+        *
+        * NOTE: The CTRL0::PIO_MODE description is important, since
+        * it outlines how the PIO mode is really supposed to work.
+        */
        if (msg->flags & I2C_M_RD) {
+               /*
+                * PIO READ transfer:
+                *
+                * This transfer MUST be limited to 4 bytes maximum. It is not
+                * possible to transfer more than four bytes via PIO, since we
+                * can not in any way make sure we can read the data from the
+                * DATA register fast enough. Besides, the RX FIFO is only four
+                * bytes deep, thus we can only really read up to four bytes at
+                * time. Finally, there is no bit indicating us that new data
+                * arrived at the FIFO and can thus be fetched from the DATA
+                * register.
+                */
+               BUG_ON(msg->len > 4);
+
                addr_data |= I2C_SMBUS_READ;
 
                /* SELECT command. */
-               mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT);
-
-               ret = mxs_i2c_pio_wait_dmareq(i2c);
-               if (ret)
-                       return ret;
-
-               writel(addr_data, i2c->regs + MXS_I2C_DATA);
-               writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+               mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT,
+                                             addr_data);
 
-               ret = mxs_i2c_pio_wait_cplt(i2c, 0);
-               if (ret)
-                       return ret;
-
-               if (mxs_i2c_pio_check_error_state(i2c))
+               ret = mxs_i2c_pio_wait_xfer_end(i2c);
+               if (ret) {
+                       dev_err(i2c->dev,
+                               "PIO: Failed to send SELECT command!\n");
                        goto cleanup;
+               }
 
                /* READ command. */
                mxs_i2c_pio_trigger_cmd(i2c,
                                        MXS_CMD_I2C_READ | flags |
                                        MXS_I2C_CTRL0_XFER_COUNT(msg->len));
 
+               ret = mxs_i2c_pio_wait_xfer_end(i2c);
+               if (ret) {
+                       dev_err(i2c->dev,
+                               "PIO: Failed to send SELECT command!\n");
+                       goto cleanup;
+               }
+
+               data = readl(i2c->regs + MXS_I2C_DATA(i2c));
                for (i = 0; i < msg->len; i++) {
-                       if ((i & 3) == 0) {
-                               ret = mxs_i2c_pio_wait_dmareq(i2c);
-                               if (ret)
-                                       return ret;
-                               data = readl(i2c->regs + MXS_I2C_DATA);
-                               writel(MXS_I2C_DEBUG0_DMAREQ,
-                                      i2c->regs + MXS_I2C_DEBUG0_CLR);
-                       }
                        msg->buf[i] = data & 0xff;
                        data >>= 8;
                }
        } else {
+               /*
+                * PIO WRITE transfer:
+                *
+                * The code below implements clock stretching to circumvent
+                * the possibility of kernel not being able to supply data
+                * fast enough. It is possible to transfer arbitrary amount
+                * of data using PIO write.
+                */
                addr_data |= I2C_SMBUS_WRITE;
 
-               /* WRITE command. */
-               mxs_i2c_pio_trigger_cmd(i2c,
-                                       MXS_CMD_I2C_WRITE | flags |
-                                       MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1));
-
                /*
                 * The LSB of data buffer is the first byte blasted across
                 * the bus. Higher order bytes follow. Thus the following
                 * filling schematic.
                 */
+
                data = addr_data << 24;
+
+               /* Start the transfer with START condition. */
+               start = MXS_I2C_CTRL0_PRE_SEND_START;
+
+               /* If the transfer is long, use clock stretching. */
+               if (msg->len > 3)
+                       start |= MXS_I2C_CTRL0_RETAIN_CLOCK;
+
                for (i = 0; i < msg->len; i++) {
                        data >>= 8;
                        data |= (msg->buf[i] << 24);
-                       if ((i & 3) == 2) {
-                               ret = mxs_i2c_pio_wait_dmareq(i2c);
-                               if (ret)
-                                       return ret;
-                               writel(data, i2c->regs + MXS_I2C_DATA);
-                               writel(MXS_I2C_DEBUG0_DMAREQ,
-                                      i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+                       xmit = 0;
+
+                       /* This is the last transfer of the message. */
+                       if (i + 1 == msg->len) {
+                               /* Add optional STOP flag. */
+                               start |= flags;
+                               /* Remove RETAIN_CLOCK bit. */
+                               start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK;
+                               xmit = 1;
                        }
-               }
 
-               shifts_left = 24 - (i & 3) * 8;
-               if (shifts_left) {
-                       data >>= shifts_left;
-                       ret = mxs_i2c_pio_wait_dmareq(i2c);
-                       if (ret)
-                               return ret;
-                       writel(data, i2c->regs + MXS_I2C_DATA);
+                       /* Four bytes are ready in the "data" variable. */
+                       if ((i & 3) == 2)
+                               xmit = 1;
+
+                       /* Nothing interesting happened, continue stuffing. */
+                       if (!xmit)
+                               continue;
+
+                       /*
+                        * Compute the size of the transfer and shift the
+                        * data accordingly.
+                        *
+                        * i = (4k + 0) .... xlen = 2
+                        * i = (4k + 1) .... xlen = 3
+                        * i = (4k + 2) .... xlen = 4
+                        * i = (4k + 3) .... xlen = 1
+                        */
+
+                       if ((i % 4) == 3)
+                               xlen = 1;
+                       else
+                               xlen = (i % 4) + 2;
+
+                       data >>= (4 - xlen) * 8;
+
+                       dev_dbg(i2c->dev,
+                               "PIO: len=%i pos=%i total=%i [W%s%s%s]\n",
+                               xlen, i, msg->len,
+                               start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "",
+                               start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "",
+                               start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : "");
+
                        writel(MXS_I2C_DEBUG0_DMAREQ,
-                              i2c->regs + MXS_I2C_DEBUG0_CLR);
+                              i2c->regs + MXS_I2C_DEBUG0_CLR(i2c));
+
+                       mxs_i2c_pio_trigger_write_cmd(i2c,
+                               start | MXS_I2C_CTRL0_MASTER_MODE |
+                               MXS_I2C_CTRL0_DIRECTION |
+                               MXS_I2C_CTRL0_XFER_COUNT(xlen), data);
+
+                       /* The START condition is sent only once. */
+                       start &= ~MXS_I2C_CTRL0_PRE_SEND_START;
+
+                       /* Wait for the end of the transfer. */
+                       ret = mxs_i2c_pio_wait_xfer_end(i2c);
+                       if (ret) {
+                               dev_err(i2c->dev,
+                                       "PIO: Failed to finish WRITE cmd!\n");
+                               break;
+                       }
+
+                       /* Check NAK here. */
+                       ret = readl(i2c->regs + MXS_I2C_STAT) &
+                                   MXS_I2C_STAT_GOT_A_NAK;
+                       if (ret) {
+                               ret = -ENXIO;
+                               goto cleanup;
+                       }
                }
        }
 
-       ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP);
-       if (ret)
-               return ret;
-
        /* make sure we capture any occurred error into cmd_err */
-       mxs_i2c_pio_check_error_state(i2c);
+       ret = mxs_i2c_pio_check_error_state(i2c);
 
 cleanup:
        /* Clear any dangling IRQs and re-enable interrupts. */
        writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
 
-       return 0;
+       /* Clear the PIO_MODE on i.MX23 */
+       if (i2c->dev_type == MXS_I2C_V1)
+               writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR);
+
+       return ret;
 }
 
 /*
@@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                                int stop)
 {
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
-       int ret, err;
+       int ret;
        int flags;
+       int use_pio = 0;
 
        flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
 
@@ -491,19 +575,21 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                return -EINVAL;
 
        /*
-        * The current boundary to select between PIO/DMA transfer method
-        * is set to 8 bytes, transfers shorter than 8 bytes are transfered
-        * using PIO mode while longer transfers use DMA. The 8 byte border is
-        * based on this empirical measurement and a lot of previous frobbing.
+        * The MX28 I2C IP block can only do PIO READ for transfer of to up
+        * 4 bytes of length. The write transfer is not limited as it can use
+        * clock stretching to avoid FIFO underruns.
         */
+       if ((msg->flags & I2C_M_RD) && (msg->len <= 4))
+               use_pio = 1;
+       if (!(msg->flags & I2C_M_RD) && (msg->len < 7))
+               use_pio = 1;
+
        i2c->cmd_err = 0;
-       if (0) {        /* disable PIO mode until a proper fix is made */
+       if (use_pio) {
                ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
-               if (ret) {
-                       err = mxs_i2c_reset(i2c);
-                       if (err)
-                               return err;
-               }
+               /* No need to reset the block if NAK was received. */
+               if (ret && (ret != -ENXIO))
+                       mxs_i2c_reset(i2c);
        } else {
                INIT_COMPLETION(i2c->cmd_complete);
                ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                                                msecs_to_jiffies(1000));
                if (ret == 0)
                        goto timeout;
+
+               ret = i2c->cmd_err;
        }
 
-       if (i2c->cmd_err == -ENXIO) {
+       if (ret == -ENXIO) {
                /*
                 * If the transfer fails with a NAK from the slave the
                 * controller halts until it gets told to return to idle state.
@@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                       i2c->regs + MXS_I2C_CTRL1_SET);
        }
 
-       ret = i2c->cmd_err;
+       /*
+        * WARNING!
+        * The i.MX23 is strange. After each and every operation, it's I2C IP
+        * block must be reset, otherwise the IP block will misbehave. This can
+        * be observed on the bus by the block sending out one single byte onto
+        * the bus. In case such an error happens, bit 27 will be set in the
+        * DEBUG0 register. This bit is not documented in the i.MX23 datasheet
+        * and is marked as "TBD" instead. To reset this bit to a correct state,
+        * reset the whole block. Since the block reset does not take long, do
+        * reset the block after every transfer to play safe.
+        */
+       if (i2c->dev_type == MXS_I2C_V1)
+               mxs_i2c_reset(i2c);
 
        dev_dbg(i2c->dev, "Done with err=%d\n", ret);
 
@@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
        return 0;
 }
 
+static struct platform_device_id mxs_i2c_devtype[] = {
+       {
+               .name = "imx23-i2c",
+               .driver_data = MXS_I2C_V1,
+       }, {
+               .name = "imx28-i2c",
+               .driver_data = MXS_I2C_V2,
+       }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype);
+
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+       { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], },
+       { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
 static int mxs_i2c_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *of_id =
+                               of_match_device(mxs_i2c_dt_ids, &pdev->dev);
        struct device *dev = &pdev->dev;
        struct mxs_i2c_dev *i2c;
        struct i2c_adapter *adap;
@@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev)
        if (!i2c)
                return -ENOMEM;
 
+       if (of_id) {
+               const struct platform_device_id *device_id = of_id->data;
+               i2c->dev_type = device_id->driver_data;
+       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
 
@@ -768,24 +893,19 @@ static int mxs_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id mxs_i2c_dt_ids[] = {
-       { .compatible = "fsl,imx28-i2c", },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
-
 static struct platform_driver mxs_i2c_driver = {
        .driver = {
                   .name = DRIVER_NAME,
                   .owner = THIS_MODULE,
                   .of_match_table = mxs_i2c_dt_ids,
                   },
+       .probe = mxs_i2c_probe,
        .remove = mxs_i2c_remove,
 };
 
 static int __init mxs_i2c_init(void)
 {
-       return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+       return platform_driver_register(&mxs_i2c_driver);
 }
 subsys_initcall(mxs_i2c_init);
 
@@ -795,6 +915,7 @@ static void __exit mxs_i2c_exit(void)
 }
 module_exit(mxs_i2c_exit);
 
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
 MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
 MODULE_DESCRIPTION("MXS I2C Bus Driver");
 MODULE_LICENSE("GPL");
index 6d8308d5dc4e915c4584e2d5d9b3e5690882a2b9..9967a6f9c2ffba2fe4521a8abf19b49e978110a4 100644 (file)
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                /*
                 * ProDB0017052: Clear ARDY bit twice
                 */
+               if (stat & OMAP_I2C_STAT_ARDY)
+                       omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
+
                if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
                                        OMAP_I2C_STAT_AL)) {
                        omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
index d2fe11da5e82a43cdc80c9fc9446aeb1e7c7fa0b..8603f5e805aa9741b142141f37575faeb1a3007e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/i2c/i2c-rcar.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
@@ -226,15 +227,16 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
                                    u32 bus_speed,
                                    struct device *dev)
 {
-       struct clk *clkp = clk_get(NULL, "peripheral_clk");
+       struct clk *clkp = clk_get(dev, NULL);
        u32 scgd, cdf;
        u32 round, ick;
        u32 scl;
        u32 cdf_width;
+       unsigned long rate;
 
-       if (!clkp) {
-               dev_err(dev, "there is no peripheral_clk\n");
-               return -EIO;
+       if (IS_ERR(clkp)) {
+               dev_err(dev, "couldn't get clock\n");
+               return PTR_ERR(clkp);
        }
 
        switch (priv->devtype) {
@@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
         * clkp : peripheral_clk
         * F[]  : integer up-valuation
         */
-       for (cdf = 0; cdf < (1 << cdf_width); cdf++) {
-               ick = clk_get_rate(clkp) / (1 + cdf);
-               if (ick < 20000000)
-                       goto ick_find;
+       rate = clk_get_rate(clkp);
+       cdf = rate / 20000000;
+       if (cdf >= 1 << cdf_width) {
+               dev_err(dev, "Input clock %lu too high\n", rate);
+               return -EIO;
        }
-       dev_err(dev, "there is no best CDF\n");
-       return -EIO;
+       ick = rate / (cdf + 1);
 
-ick_find:
        /*
         * it is impossible to calculate large scale
         * number on u32. separate it
@@ -290,6 +291,12 @@ ick_find:
         *
         * Calculation result (= SCL) should be less than
         * bus_speed for hardware safety
+        *
+        * We could use something along the lines of
+        *      div = ick / (bus_speed + 1) + 1;
+        *      scgd = (div - 20 - round + 7) / 8;
+        *      scl = ick / (20 + (scgd * 8) + round);
+        * (not fully verified) but that would get pretty involved
         */
        for (scgd = 0; scgd < 0x40; scgd++) {
                scl = ick / (20 + (scgd * 8) + round);
@@ -306,7 +313,7 @@ scgd_find:
        /*
         * keep icccr value
         */
-       priv->icccr = (scgd << (cdf_width) | cdf);
+       priv->icccr = scgd << cdf_width | cdf;
 
        return 0;
 }
@@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = {
        .functionality  = rcar_i2c_func,
 };
 
+static const struct of_device_id rcar_i2c_dt_ids[] = {
+       { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_H2 },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+
 static int rcar_i2c_probe(struct platform_device *pdev)
 {
        struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        }
 
        bus_speed = 100000; /* default 100 kHz */
-       if (pdata && pdata->bus_speed)
+       ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
+       if (ret < 0 && pdata && pdata->bus_speed)
                bus_speed = pdata->bus_speed;
 
-       priv->devtype = platform_get_device_id(pdev)->driver_data;
+       if (pdev->dev.of_node)
+               priv->devtype = (long)of_match_device(rcar_i2c_dt_ids,
+                                                     dev)->data;
+       else
+               priv->devtype = platform_get_device_id(pdev)->driver_data;
 
        ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
        if (ret < 0)
@@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        adap->class             = I2C_CLASS_HWMON | I2C_CLASS_SPD;
        adap->retries           = 3;
        adap->dev.parent        = dev;
+       adap->dev.of_node       = dev->of_node;
        i2c_set_adapdata(adap, priv);
        strlcpy(adap->name, pdev->name, sizeof(adap->name));
 
@@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = {
        .driver = {
                .name   = "i2c-rcar",
                .owner  = THIS_MODULE,
+               .of_match_table = rcar_i2c_dt_ids,
        },
        .probe          = rcar_i2c_probe,
        .remove         = rcar_i2c_remove,
index 3535f3c0f7b43233b09123c89b4727fc302cba3b..3747b9bf67d6440f684bcff945a2a85b93f6fe6c 100644 (file)
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&i2c->adap);
 
-       clk_disable_unprepare(i2c->clk);
-
        if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
                s3c24xx_i2c_dt_gpio_free(i2c);
 
index f8f6f2e552db29e344b41f4e42d0cbc1ac9fc000..04a17b9b38bbbb9f54cd8cb91ce2888aac95a478 100644 (file)
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = {
        .functionality  = stu300_func,
 };
 
-static int __init
-stu300_probe(struct platform_device *pdev)
+static int stu300_probe(struct platform_device *pdev)
 {
        struct stu300_dev *dev;
        struct i2c_adapter *adap;
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
 #define STU300_I2C_PM  NULL
 #endif
 
-static int __exit
-stu300_remove(struct platform_device *pdev)
+static int stu300_remove(struct platform_device *pdev)
 {
        struct stu300_dev *dev = platform_get_drvdata(pdev);
 
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = {
                .pm     = STU300_I2C_PM,
                .of_match_table = stu300_dt_match,
        },
-       .remove         = __exit_p(stu300_remove),
+       .probe = stu300_probe,
+       .remove = stu300_remove,
 
 };
 
 static int __init stu300_init(void)
 {
-       return platform_driver_probe(&stu300_i2c_driver, stu300_probe);
+       return platform_driver_register(&stu300_i2c_driver);
 }
 
 static void __exit stu300_exit(void)
index 4c8b368d463b7c77517fb3ed314e4234bd6dafbc..6e7b09c1804e324ead3503e06c5f042cddf1f9b3 100644 (file)
@@ -702,7 +702,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
        if (irq < 0)
                goto resource_missing;
 
-       pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
+       pdata = dev_get_platdata(&pdev->dev);
 
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
        if (!i2c)
index 29d3f045a2bfbc688beeb79f888cb72d10c3bcd2..5923cfa390c86de6528559c229ace0a4b39b402c 100644 (file)
@@ -248,16 +248,17 @@ static int i2c_device_probe(struct device *dev)
        driver = to_i2c_driver(dev->driver);
        if (!driver->probe || !driver->id_table)
                return -ENODEV;
-       client->driver = driver;
+
        if (!device_can_wakeup(&client->dev))
                device_init_wakeup(&client->dev,
                                        client->flags & I2C_CLIENT_WAKE);
        dev_dbg(dev, "probe\n");
 
+       acpi_dev_pm_attach(&client->dev, true);
        status = driver->probe(client, i2c_match_id(driver->id_table, client));
        if (status) {
-               client->driver = NULL;
                i2c_set_clientdata(client, NULL);
+               acpi_dev_pm_detach(&client->dev, true);
        }
        return status;
 }
@@ -279,10 +280,9 @@ static int i2c_device_remove(struct device *dev)
                dev->driver = NULL;
                status = 0;
        }
-       if (status == 0) {
-               client->driver = NULL;
+       if (status == 0)
                i2c_set_clientdata(client, NULL);
-       }
+       acpi_dev_pm_detach(&client->dev, true);
        return status;
 }
 
@@ -1111,8 +1111,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
        if (ret < 0 || !info.addr)
                return AE_OK;
 
+       adev->power.flags.ignore_parent = true;
        strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
        if (!i2c_new_device(adapter, &info)) {
+               adev->power.flags.ignore_parent = false;
                dev_err(&adapter->dev,
                        "failed to add I2C device %s from ACPI\n",
                        dev_name(&adev->dev));
@@ -1134,6 +1136,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
        acpi_handle handle;
        acpi_status status;
 
+       if (!adap->dev.parent)
+               return;
+
        handle = ACPI_HANDLE(adap->dev.parent);
        if (!handle)
                return;
@@ -1606,9 +1611,14 @@ static int i2c_cmd(struct device *dev, void *_arg)
 {
        struct i2c_client       *client = i2c_verify_client(dev);
        struct i2c_cmd_arg      *arg = _arg;
+       struct i2c_driver       *driver;
+
+       if (!client || !client->dev.driver)
+               return 0;
 
-       if (client && client->driver && client->driver->command)
-               client->driver->command(client, arg->cmd, arg->arg);
+       driver = to_i2c_driver(client->dev.driver);
+       if (driver->command)
+               driver->command(client, arg->cmd, arg->arg);
        return 0;
 }
 
index c3ccdea3d18059c4dd199f533932ff17f0b4f448..80b47e8ce030cef7ff6f9ab9f058b15e63f7329e 100644 (file)
@@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
        kfree(i2c_dev);
 }
 
-static ssize_t show_adapter_name(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
 
@@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev,
                return -ENODEV;
        return sprintf(buf, "%s\n", i2c_dev->adap->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *i2c_attrs[] = {
+       &dev_attr_name.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(i2c);
 
 /* ------------------------------------------------------------------------- */
 
@@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
                res = PTR_ERR(i2c_dev->dev);
                goto error;
        }
-       res = device_create_file(i2c_dev->dev, &dev_attr_name);
-       if (res)
-               goto error_destroy;
 
        pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
                 adap->name, adap->nr);
        return 0;
-error_destroy:
-       device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 error:
        return_i2c_dev(i2c_dev);
        return res;
@@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
        if (!i2c_dev) /* attach_adapter must have failed */
                return 0;
 
-       device_remove_file(i2c_dev->dev, &dev_attr_name);
        return_i2c_dev(i2c_dev);
        device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 
@@ -637,6 +637,7 @@ static int __init i2c_dev_init(void)
                res = PTR_ERR(i2c_dev_class);
                goto out_unreg_chrdev;
        }
+       i2c_dev_class->dev_groups = i2c_groups;
 
        /* Keep track of adapters which will be added or removed later */
        res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
index 44d4c6071c15096e19d16433092b7428c113f34f..c99b229873665b56656a7cabd95c7dfd6acb3fb1 100644 (file)
@@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
 {
        struct i2c_client *client = i2c_verify_client(dev);
        struct alert_data *data = addrp;
+       struct i2c_driver *driver;
 
        if (!client || client->addr != data->addr)
                return 0;
@@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp)
 
        /*
         * Drivers should either disable alerts, or provide at least
-        * a minimal handler.  Lock so client->driver won't change.
+        * a minimal handler.  Lock so the driver won't change.
         */
        device_lock(dev);
-       if (client->driver) {
-               if (client->driver->alert)
-                       client->driver->alert(client, data->flag);
+       if (client->dev.driver) {
+               driver = to_i2c_driver(client->dev.driver);
+               if (driver->alert)
+                       driver->alert(client, data->flag);
                else
                        dev_warn(&client->dev, "no driver alert()!\n");
        } else
index 74b41ae690f3e6dab7271319ed62c436ac30935a..c58e093b6032480a316c1725db5f6879842cd2ef 100644 (file)
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
        arb->parent = of_find_i2c_adapter_by_node(parent_np);
        if (!arb->parent) {
                dev_err(dev, "Cannot find parent bus\n");
-               return -EINVAL;
+               return -EPROBE_DEFER;
        }
 
        /* Actually add the mux adapter */
@@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "i2c-arb-gpio-challenge",
-               .of_match_table = of_match_ptr(i2c_arbitrator_of_match),
+               .of_match_table = i2c_arbitrator_of_match,
        },
 };
 
index 5d4a99ba743e39b21a9483ce2a15aabb41a84934..d72841f8010ca6709e52a12e2e8b566b9dcd9544 100644 (file)
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
        struct device_node *adapter_np, *child;
        struct i2c_adapter *adapter;
        unsigned *values, *gpios;
-       int i = 0;
+       int i = 0, ret;
 
        if (!np)
                return -ENODEV;
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
        adapter = of_find_i2c_adapter_by_node(adapter_np);
        if (!adapter) {
                dev_err(&pdev->dev, "Cannot find parent bus\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
        mux->data.parent = i2c_adapter_id(adapter);
        put_device(&adapter->dev);
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
                return -ENOMEM;
        }
 
-       for (i = 0; i < mux->data.n_gpios; i++)
-               gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
+       for (i = 0; i < mux->data.n_gpios; i++) {
+               ret = of_get_named_gpio(np, "mux-gpios", i);
+               if (ret < 0)
+                       return ret;
+               gpios[i] = ret;
+       }
 
        mux->data.gpios = gpios;
 
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
        if (!parent) {
                dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
                        mux->data.parent);
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        mux->parent = parent;
@@ -279,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "i2c-mux-gpio",
-               .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
+               .of_match_table = i2c_mux_gpio_of_match,
        },
 };
 
index 69a91732ae6561560ca6286007a10602e0e09ba4..68a37157377df12797b1b122ca4568d121559112 100644 (file)
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
        adapter = of_find_i2c_adapter_by_node(adapter_np);
        if (!adapter) {
                dev_err(mux->dev, "Cannot find parent bus\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
        mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
        put_device(&adapter->dev);
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
        if (!mux->parent) {
                dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
                        mux->pdata->parent_bus_num);
-               ret = -ENODEV;
+               ret = -EPROBE_DEFER;
                goto err;
        }
 
index 02906ca99b41c4583ca6c3adb18f309ec88ffad5..5dba90a8a27cbc8445cffbf621971ce5d9c6fb43 100644 (file)
@@ -722,13 +722,6 @@ config BLK_DEV_IDE_RAPIDE
          Say Y here if you want to support the Yellowstone RapIDE controller
          manufactured for use with Acorn computers.
 
-config IDE_H8300
-       tristate "H8300 IDE support"
-       depends on H8300
-       default y
-       help
-         Enables the H8300 IDE driver.
-
 config BLK_DEV_GAYLE
        tristate "Amiga Gayle IDE interface support"
        depends on AMIGA
index af8d016c37eaae0dc4f5c97b9aecce840d6fec57..a04ee82f1c8f5bf5bd712f90dd73744a37762e2a 100644 (file)
@@ -78,8 +78,6 @@ obj-$(CONFIG_BLK_DEV_CMD640)          += cmd640.o
 
 obj-$(CONFIG_BLK_DEV_IDE_PMAC)         += pmac.o
 
-obj-$(CONFIG_IDE_H8300)                        += ide-h8300.o
-
 obj-$(CONFIG_IDE_GENERIC)              += ide-generic.o
 obj-$(CONFIG_BLK_DEV_IDEPNP)           += ide-pnp.o
 
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
deleted file mode 100644 (file)
index 520f42c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * H8/300 generic IDE interface
- */
-
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define DRV_NAME "ide-h8300"
-
-#define bswap(d) \
-({                                     \
-       u16 r;                          \
-       __asm__("mov.b %w1,r1h\n\t"     \
-               "mov.b %x1,r1l\n\t"     \
-               "mov.w r1,%0"           \
-               :"=r"(r)                \
-               :"r"(d)                 \
-               :"er1");                \
-       (r);                            \
-})
-
-static void mm_outsw(unsigned long addr, void *buf, u32 len)
-{
-       unsigned short *bp = (unsigned short *)buf;
-       for (; len > 0; len--, bp++)
-               *(volatile u16 *)addr = bswap(*bp);
-}
-
-static void mm_insw(unsigned long addr, void *buf, u32 len)
-{
-       unsigned short *bp = (unsigned short *)buf;
-       for (; len > 0; len--, bp++)
-               *bp = bswap(*(volatile u16 *)addr);
-}
-
-static void h8300_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
-                            void *buf, unsigned int len)
-{
-       mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static void h8300_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
-                             void *buf, unsigned int len)
-{
-       mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static const struct ide_tp_ops h8300_tp_ops = {
-       .exec_command           = ide_exec_command,
-       .read_status            = ide_read_status,
-       .read_altstatus         = ide_read_altstatus,
-       .write_devctl           = ide_write_devctl,
-
-       .dev_select             = ide_dev_select,
-       .tf_load                = ide_tf_load,
-       .tf_read                = ide_tf_read,
-
-       .input_data             = h8300_input_data,
-       .output_data            = h8300_output_data,
-};
-
-#define H8300_IDE_GAP (2)
-
-static inline void hw_setup(struct ide_hw *hw)
-{
-       int i;
-
-       memset(hw, 0, sizeof(*hw));
-       for (i = 0; i <= 7; i++)
-               hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
-       hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
-       hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
-}
-
-static const struct ide_port_info h8300_port_info = {
-       .tp_ops                 = &h8300_tp_ops,
-       .host_flags             = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
-       .chipset                = ide_generic,
-};
-
-static int __init h8300_ide_init(void)
-{
-       struct ide_hw hw, *hws[] = { &hw };
-
-       printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
-
-       if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
-               goto out_busy;
-       if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
-               release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8);
-               goto out_busy;
-       }
-
-       hw_setup(&hw);
-
-       return ide_host_add(&h8300_port_info, hws, 1, NULL);
-
-out_busy:
-       printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
-
-       return -EBUSY;
-}
-
-module_init(h8300_ide_init);
-
-MODULE_LICENSE("GPL");
index fa6964d8681a0d126fcf7c4845896b3f06298f8f..3a449f65eb2d3beb889217893a0e9d677214c1d9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * intel_idle.c - native hardware idle loop for modern Intel processors
  *
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
  */
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
        {
                .name = "C1-NHM",
                .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
        {
                .name = "C1-SNB",
                .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
        {
                .name = "C1-IVB",
                .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
        {
                .name = "C1-HSW",
                .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .name = "C1E-ATM",
                .desc = "MWAIT 0x00",
@@ -329,6 +329,36 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
        {
                .enter = NULL }
 };
+static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
+       {
+               .name = "C1-AVN",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-AVN",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 5,
+               .target_residency = 10,
+               .enter = &intel_idle },
+       {
+               .name = "C6NS-AVN",     /* No Cache Shrink */
+               .desc = "MWAIT 0x51",
+               .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 15,
+               .target_residency = 45,
+               .enter = &intel_idle },
+       {
+               .name = "C6FS-AVN",     /* Full Cache shrink */
+               .desc = "MWAIT 0x52",
+               .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 150,            /* fake penalty added due to cold cache */
+               .target_residency = 100000,     /* fake penalty added due to cold cache */
+               .enter = &intel_idle },
+};
 
 /**
  * intel_idle
@@ -462,6 +492,11 @@ static const struct idle_cpu idle_cpu_hsw = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_avn = {
+       .state_table = avn_cstates,
+       .disable_promotion_to_c1e = true,
+};
+
 #define ICPU(model, cpu) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -483,6 +518,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
        ICPU(0x46, idle_cpu_hsw),
+       ICPU(0x4D, idle_cpu_avn),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -490,7 +526,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
 /*
  * intel_idle_probe()
  */
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
 {
        unsigned int eax, ebx, ecx;
        const struct x86_cpu_id *id;
@@ -558,7 +594,7 @@ static void intel_idle_cpuidle_devices_uninit(void)
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
  */
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
 {
        int cstate;
        struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +664,7 @@ static int intel_idle_cpu_init(int cpu)
                int num_substates, mwait_hint, mwait_cstate, mwait_substate;
 
                if (cpuidle_state_table[cstate].enter == NULL)
-                       continue;
+                       break;
 
                if (cstate + 1 > max_cstate) {
                        printk(PREFIX "max_cstate %d reached\n", max_cstate);
index 12e32e6b41037c334c1eb8b3cef0e70145f8d228..81e3dc260993124981d45cacb9e472e9221d7b8a 100644 (file)
@@ -620,7 +620,7 @@ static int bma180_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int bma180_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bma180_data *data = iio_priv(indio_dev);
        int ret;
 
@@ -633,7 +633,7 @@ static int bma180_suspend(struct device *dev)
 
 static int bma180_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bma180_data *data = iio_priv(indio_dev);
        int ret;
 
index 84be63bdf0382b6b44dfa14ff818e72377baf779..0f16b553e063f602e4e83104382827bb55cc72b2 100644 (file)
@@ -556,7 +556,7 @@ static const struct iio_info at91_adc_info = {
 
 static int at91_adc_probe(struct platform_device *pdev)
 {
-       unsigned int prsc, mstrclk, ticks, adc_clk, shtim;
+       unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
        int ret;
        struct iio_dev *idev;
        struct at91_adc_state *st;
@@ -649,6 +649,7 @@ static int at91_adc_probe(struct platform_device *pdev)
         */
        mstrclk = clk_get_rate(st->clk);
        adc_clk = clk_get_rate(st->adc_clk);
+       adc_clk_khz = adc_clk / 1000;
        prsc = (mstrclk / (2 * adc_clk)) - 1;
 
        if (!st->startup_time) {
@@ -662,15 +663,15 @@ static int at91_adc_probe(struct platform_device *pdev)
         * defined in the electrical characteristics of the board, divided by 8.
         * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
         */
-       ticks = round_up((st->startup_time * adc_clk /
-                         1000000) - 1, 8) / 8;
+       ticks = round_up((st->startup_time * adc_clk_khz /
+                         1000) - 1, 8) / 8;
        /*
         * a minimal Sample and Hold Time is necessary for the ADC to guarantee
         * the best converted final value between two channels selection
         * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
         */
-       shtim = round_up((st->sample_hold_time * adc_clk /
-                         1000000) - 1, 1);
+       shtim = round_up((st->sample_hold_time * adc_clk_khz /
+                         1000) - 1, 1);
 
        reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
        reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
index d0a79a4bce1c8f5d83410392e3343d779b015e2a..ba6f6a91dfffc63459ca9e92299832e946f740e3 100644 (file)
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
 
        iio_device_unregister(indio_dev);
 
-       if (!IS_ERR(reg)) {
+       if (!IS_ERR(reg))
                regulator_disable(reg);
-               regulator_put(reg);
-       }
 
        return 0;
 }
index 9d19ba74f22bd92125501e2827ef51bbae59da13..415f3c6efd7293087cc1d5fa9313e9de308e6245 100644 (file)
@@ -41,6 +41,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
                goto error_ret;
        }
 
+       iio_buffer_init(&cb_buff->buffer);
+
        cb_buff->private = private;
        cb_buff->cb = cb;
        cb_buff->buffer.access = &iio_cb_access;
index 1f4a48e6a82c33f29b8985ad556698f0630e21af..1397b6e0e414c25835aebd829dc2bede15dedee3 100644 (file)
@@ -37,21 +37,21 @@ struct mcp4725_data {
 
 static int mcp4725_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct mcp4725_data *data = iio_priv(indio_dev);
+       struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        u8 outbuf[2];
 
        outbuf[0] = (data->powerdown_mode + 1) << 4;
        outbuf[1] = 0;
        data->powerdown = true;
 
-       return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+       return i2c_master_send(data->client, outbuf, 2);
 }
 
 static int mcp4725_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct mcp4725_data *data = iio_priv(indio_dev);
+       struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        u8 outbuf[2];
 
        /* restore previous DAC value */
@@ -59,7 +59,7 @@ static int mcp4725_resume(struct device *dev)
        outbuf[1] = data->dac_value & 0xff;
        data->powerdown = false;
 
-       return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+       return i2c_master_send(data->client, outbuf, 2);
 }
 
 #ifdef CONFIG_PM_SLEEP
index a7b30be86ae06cb7ae59a077aa45ee4e58f52051..52605c0ea3a69fce7c1d312d4fa1610de43174fd 100644 (file)
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
        }
 
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-       if (indio_dev == NULL)
-               return -ENOMEM;
+       if (indio_dev == NULL) {
+               ret =  -ENOMEM;
+               goto error_disable_clk;
+       }
 
        st = iio_priv(indio_dev);
 
index 05c1b74502a37265c86a23edaf97d4e602ead575..9b32253b824be454bf2f6c46bf02acb022b3be46 100644 (file)
@@ -49,11 +49,15 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 #define iio_buffer_poll_addr (&iio_buffer_poll)
 #define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
 
+void iio_disable_all_buffers(struct iio_dev *indio_dev);
+
 #else
 
 #define iio_buffer_poll_addr NULL
 #define iio_buffer_read_first_n_outer_addr NULL
 
+static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
+
 #endif
 
 int iio_device_register_eventset(struct iio_dev *indio_dev);
index e73033f3839a5e1eceba486cadf2825b30fb7027..2db7dcd826b9db6500e354498b385cbb3ebd9f7d 100644 (file)
@@ -460,6 +460,28 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
        return bytes;
 }
 
+void iio_disable_all_buffers(struct iio_dev *indio_dev)
+{
+       struct iio_buffer *buffer, *_buffer;
+
+       if (list_empty(&indio_dev->buffer_list))
+               return;
+
+       if (indio_dev->setup_ops->predisable)
+               indio_dev->setup_ops->predisable(indio_dev);
+
+       list_for_each_entry_safe(buffer, _buffer,
+                       &indio_dev->buffer_list, buffer_list)
+               list_del_init(&buffer->buffer_list);
+
+       indio_dev->currentmode = INDIO_DIRECT_MODE;
+       if (indio_dev->setup_ops->postdisable)
+               indio_dev->setup_ops->postdisable(indio_dev);
+
+       if (indio_dev->available_scan_masks == NULL)
+               kfree(indio_dev->active_scan_mask);
+}
+
 int iio_update_buffers(struct iio_dev *indio_dev,
                       struct iio_buffer *insert_buffer,
                       struct iio_buffer *remove_buffer)
@@ -528,8 +550,15 @@ int iio_update_buffers(struct iio_dev *indio_dev,
                         * Note can only occur when adding a buffer.
                         */
                        list_del(&insert_buffer->buffer_list);
-                       indio_dev->active_scan_mask = old_mask;
-                       success = -EINVAL;
+                       if (old_mask) {
+                               indio_dev->active_scan_mask = old_mask;
+                               success = -EINVAL;
+                       }
+                       else {
+                               kfree(compound_mask);
+                               ret = -EINVAL;
+                               goto error_ret;
+                       }
                }
        } else {
                indio_dev->active_scan_mask = compound_mask;
index 97f0297b120f41e7f73d9b5a5cf27887da146a92..f95c6979efd8f58fdf42e2a6d11a645b4191c17d 100644 (file)
@@ -848,13 +848,10 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(device);
-       if (indio_dev->chrdev.dev)
-               cdev_del(&indio_dev->chrdev);
        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
                iio_device_unregister_trigger_consumer(indio_dev);
        iio_device_unregister_eventset(indio_dev);
        iio_device_unregister_sysfs(indio_dev);
-       iio_device_unregister_debugfs(indio_dev);
 
        ida_simple_remove(&iio_ida, indio_dev->id);
        kfree(indio_dev);
@@ -970,6 +967,8 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
        if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
                return -EBUSY;
 
+       iio_device_get(indio_dev);
+
        filp->private_data = indio_dev;
 
        return 0;
@@ -983,6 +982,8 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
        struct iio_dev *indio_dev = container_of(inode->i_cdev,
                                                struct iio_dev, chrdev);
        clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+       iio_device_put(indio_dev);
+
        return 0;
 }
 
@@ -1052,18 +1053,20 @@ int iio_device_register(struct iio_dev *indio_dev)
                indio_dev->setup_ops == NULL)
                indio_dev->setup_ops = &noop_ring_setup_ops;
 
-       ret = device_add(&indio_dev->dev);
-       if (ret < 0)
-               goto error_unreg_eventset;
        cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
        indio_dev->chrdev.owner = indio_dev->info->driver_module;
+       indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
        ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
        if (ret < 0)
-               goto error_del_device;
-       return 0;
+               goto error_unreg_eventset;
 
-error_del_device:
-       device_del(&indio_dev->dev);
+       ret = device_add(&indio_dev->dev);
+       if (ret < 0)
+               goto error_cdev_del;
+
+       return 0;
+error_cdev_del:
+       cdev_del(&indio_dev->chrdev);
 error_unreg_eventset:
        iio_device_unregister_eventset(indio_dev);
 error_free_sysfs:
@@ -1078,9 +1081,17 @@ EXPORT_SYMBOL(iio_device_register);
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
        mutex_lock(&indio_dev->info_exist_lock);
+
+       device_del(&indio_dev->dev);
+
+       if (indio_dev->chrdev.dev)
+               cdev_del(&indio_dev->chrdev);
+       iio_device_unregister_debugfs(indio_dev);
+
+       iio_disable_all_buffers(indio_dev);
+
        indio_dev->info = NULL;
        mutex_unlock(&indio_dev->info_exist_lock);
-       device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
index 10aa9ef86cece1b80fa09c57d2a52e344e4dfc12..6be65ef5faa9b6e46716857af7ed67d8eb7ea004 100644 (file)
@@ -72,7 +72,8 @@ EXPORT_SYMBOL(iio_push_event);
 static unsigned int iio_event_poll(struct file *filep,
                             struct poll_table_struct *wait)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
        unsigned int events = 0;
 
        poll_wait(filep, &ev_int->wait, wait);
@@ -90,7 +91,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
                                     size_t count,
                                     loff_t *f_ps)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
        unsigned int copied;
        int ret;
 
@@ -121,7 +123,8 @@ error_unlock:
 
 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
 
        spin_lock_irq(&ev_int->wait.lock);
        __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -133,6 +136,8 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
        kfifo_reset_out(&ev_int->det_events);
        spin_unlock_irq(&ev_int->wait.lock);
 
+       iio_device_put(indio_dev);
+
        return 0;
 }
 
@@ -158,12 +163,15 @@ int iio_event_getfd(struct iio_dev *indio_dev)
                return -EBUSY;
        }
        spin_unlock_irq(&ev_int->wait.lock);
-       fd = anon_inode_getfd("iio:event",
-                               &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+       iio_device_get(indio_dev);
+
+       fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
+                               indio_dev, O_RDONLY);
        if (fd < 0) {
                spin_lock_irq(&ev_int->wait.lock);
                __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
                spin_unlock_irq(&ev_int->wait.lock);
+               iio_device_put(indio_dev);
        }
        return fd;
 }
@@ -276,7 +284,7 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
                        goto error_ret;
                }
                if (chan->modified)
-                       mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+                       mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
                                                  i/IIO_EV_DIR_MAX,
                                                  i%IIO_EV_DIR_MAX);
                else if (chan->differential)
index e8d2849cc81d9110356691caaba587c80afff57e..cab3bc7494a2260214a85874310ac80deca87c0a 100644 (file)
@@ -29,9 +29,9 @@
 #define ST_MAGN_NUMBER_DATA_CHANNELS           3
 
 /* DEFAULT VALUE FOR SENSORS */
-#define ST_MAGN_DEFAULT_OUT_X_L_ADDR           0X04
-#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR           0X08
-#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR           0X06
+#define ST_MAGN_DEFAULT_OUT_X_H_ADDR           0X03
+#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR           0X07
+#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR           0X05
 
 /* FULLSCALE */
 #define ST_MAGN_FS_AVL_1300MG                  1300
 static const struct iio_chan_spec st_magn_16bit_channels[] = {
        ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-                       ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
-                       ST_MAGN_DEFAULT_OUT_X_L_ADDR),
+                       ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
+                       ST_MAGN_DEFAULT_OUT_X_H_ADDR),
        ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-                       ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
-                       ST_MAGN_DEFAULT_OUT_Y_L_ADDR),
+                       ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
+                       ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
        ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-                       ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
-                       ST_MAGN_DEFAULT_OUT_Z_L_ADDR),
+                       ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
+                       ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
        IIO_CHAN_SOFT_TIMESTAMP(3)
 };
 
index 64ccde3f1f7a60ae6693d1e91f117e370bd0141b..6d63883da1ab0e56be798538e1d965b1032eacb7 100644 (file)
@@ -255,12 +255,14 @@ static int tmp006_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int tmp006_suspend(struct device *dev)
 {
-       return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev)));
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+       return tmp006_powerdown(iio_priv(indio_dev));
 }
 
 static int tmp006_resume(struct device *dev)
 {
-       struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
+       struct tmp006_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
                data->config | TMP006_CONFIG_MOD_MASK);
 }
index 5ceda710f516bc4721e14961a504fa7e2c3054a9..b84791f03a27f5d174b60680f77c59de06bb542a 100644 (file)
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
          libibverbs, libibcm and a hardware driver library from
          <http://www.openfabrics.org/git/>.
 
+config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
+       bool "Experimental and unstable ABI for userspace access to flow steering verbs"
+       depends on INFINIBAND_USER_ACCESS
+       depends on STAGING
+       ---help---
+         The final ABI for userspace access to flow steering verbs
+         has not been defined.  To use the current ABI, *WHICH WILL
+         CHANGE IN THE FUTURE*, say Y here.
+
+         If unsure, say N.
+
 config INFINIBAND_USER_MEM
        bool
        depends on INFINIBAND_USER_ACCESS != n
index dab4b41f1715846bd2a26c03fd90f0175766980e..a082fd9e7ebe009465f961eab3ad8b840839f7f9 100644 (file)
@@ -2294,7 +2294,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
        int low, high, remaining;
        unsigned int rover;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(&init_net, &low, &high);
        remaining = (high - low) + 1;
        rover = net_random() % remaining + low;
 retry:
index d040b877475f3f04f9f6c2fea63ba772fbd9586c..d8f9c6c272d732a898b334be0c3034aadf37b275 100644 (file)
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
 IB_UVERBS_DECLARE_CMD(create_xsrq);
 IB_UVERBS_DECLARE_CMD(open_xrcd);
 IB_UVERBS_DECLARE_CMD(close_xrcd);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 IB_UVERBS_DECLARE_CMD(create_flow);
 IB_UVERBS_DECLARE_CMD(destroy_flow);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #endif /* UVERBS_H */
index f2b81b9ee0d6849c67693e50c351721c64df27fe..2f0f01b70e3bd22c538cd3a4081be0a9ec3f790f 100644 (file)
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
 static struct uverbs_lock_class ah_lock_class  = { .name = "AH-uobj" };
 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \
        do {                                                            \
@@ -2599,6 +2601,7 @@ out_put:
        return ret ? ret : in_len;
 }
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
                                union ib_flow_spec *ib_spec)
 {
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
                                struct ib_uverbs_create_xsrq *cmd,
index 75ad86c4abf82a86572d9ca8b35d52a7f9d2d4ce..2df31f68ea0905ef06ac1ed054348cf669cfaa61 100644 (file)
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
        [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
        [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
        [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        [IB_USER_VERBS_CMD_CREATE_FLOW]         = ib_uverbs_create_flow,
        [IB_USER_VERBS_CMD_DESTROY_FLOW]        = ib_uverbs_destroy_flow
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
                return -ENOSYS;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
                struct ib_uverbs_cmd_hdr_ex hdr_ex;
 
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     (hdr_ex.out_words +
                                                      hdr_ex.provider_out_words) * 4);
        } else {
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
                if (hdr.in_words * 4 != count)
                        return -EINVAL;
 
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     buf + sizeof(hdr),
                                                     hdr.in_words * 4,
                                                     hdr.out_words * 4);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 }
 
 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
index d5d1929753e4fdc95bbdc67625f5f0521ffeb32f..cedda25232be2454b3b2f0902152d96a7b1dfd75 100644 (file)
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
                return "C2_QP_STATE_ERROR";
        default:
                return "<invalid QP state>";
-       };
+       }
 }
 
 void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
index d6c5a73becf40ecfc0f177422f34de1f14247f32..f0612645de998f6bcba2dfa0ae34a2b51483af38 100644 (file)
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
                ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
                ibdev->ib_dev.uverbs_cmd_mask   |=
                        (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
                        (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
        }
 
        mlx4_ib_alloc_eqs(dev, ibdev);
index 3f831de9a4d8a901607afc26c81827cc5c1e3bd5..b1a6cb3a2809282fbfab1b44b09c53e315a93466 100644 (file)
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       char name[MLX5_MAX_EQ_NAME];
        struct mlx5_eq *eq, *n;
        int ncomp_vec;
        int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
                        goto clean;
                }
 
-               snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(&dev->mdev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        eq->name,
-                                        &dev->mdev.priv.uuari.uars[0]);
+                                        name, &dev->mdev.priv.uuari.uars[0]);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
        props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
-       props->atomic_cap          = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
-               IB_ATOMIC_HCA : IB_ATOMIC_NONE;
-       props->masked_atomic_cap   = IB_ATOMIC_HCA;
+       props->atomic_cap          = IB_ATOMIC_NONE;
+       props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
        props->max_mcast_grp       = 1 << dev->mdev.caps.log_max_mcg;
        props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
        ibev.device           = &ibdev->ib_dev;
        ibev.element.port_num = port;
 
+       if (port < 1 || port > ibdev->num_ports) {
+               mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+               return;
+       }
+
        if (ibdev->ib_active)
                ib_dispatch_event(&ibev);
 }
index bd41df95b6f0214deb445ab228be8d157373074f..3453580b1eb2cc45800b8ee697157a24cf269970 100644 (file)
@@ -42,6 +42,10 @@ enum {
        DEF_CACHE_SIZE  = 10,
 };
 
+enum {
+       MLX5_UMR_ALIGN  = 2048
+};
+
 static __be64 *mr_align(__be64 *ptr, int align)
 {
        unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
 
 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_ib_mr *mr;
        int npages = 1 << ent->order;
-       int size = sizeof(u64) * npages;
        int err = 0;
        int i;
 
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                }
                mr->order = ent->order;
                mr->umred = 1;
-               mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
-               if (!mr->pas) {
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-               mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
-                                        DMA_TO_DEVICE);
-               if (dma_mapping_error(ddev, mr->dma)) {
-                       kfree(mr->pas);
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-
                in->seg.status = 1 << 6;
                in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
                in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                                            sizeof(*in));
                if (err) {
                        mlx5_ib_warn(dev, "create mkey failed %d\n", err);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
                        kfree(mr);
                        goto out;
                }
@@ -129,11 +114,9 @@ out:
 
 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
        int i;
 
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 static void clean_keys(struct mlx5_ib_dev *dev, int c)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
 
+       cancel_delayed_work(&ent->dwork);
        while (1) {
                spin_lock(&ent->lock);
                if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
        int i;
 
        dev->cache.stopped = 1;
-       destroy_workqueue(dev->cache.wq);
+       flush_workqueue(dev->cache.wq);
 
        mlx5_mr_cache_debugfs_cleanup(dev);
 
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
                clean_keys(dev, i);
 
+       destroy_workqueue(dev->cache.wq);
+
        return 0;
 }
 
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                                  int page_shift, int order, int access_flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct device *ddev = dev->ib_dev.dma_device;
        struct umr_common *umrc = &dev->umrc;
        struct ib_send_wr wr, *bad;
        struct mlx5_ib_mr *mr;
        struct ib_sge sg;
+       int size = sizeof(u64) * npages;
        int err;
        int i;
 
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        if (!mr)
                return ERR_PTR(-EAGAIN);
 
-       mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
+       mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+       if (!mr->pas) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       mlx5_ib_populate_pas(dev, umem, page_shift,
+                            mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+
+       mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(ddev, mr->dma)) {
+               kfree(mr->pas);
+               err = -ENOMEM;
+               goto error;
+       }
 
        memset(&wr, 0, sizeof(wr));
        wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        wait_for_completion(&mr->done);
        up(&umrc->sem);
 
+       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+       kfree(mr->pas);
+
        if (mr->status != IB_WC_SUCCESS) {
                mlx5_ib_warn(dev, "reg umr failed\n");
                err = -EFAULT;
index 045f8cdbd303deba81e60c5f1f52330b536b9617..5659ea88074108e60d9253085ce89f962326b808 100644 (file)
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
 
        switch (qp_type) {
        case IB_QPT_XRC_INI:
-               size = sizeof(struct mlx5_wqe_xrc_seg);
+               size += sizeof(struct mlx5_wqe_xrc_seg);
                /* fall through */
        case IB_QPT_RC:
                size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
+       case IB_QPT_XRC_TGT:
+               return 0;
+
        case IB_QPT_UC:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
        case IB_QPT_UD:
        case IB_QPT_SMI:
        case IB_QPT_GSI:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_datagram_seg);
                break;
 
        case MLX5_IB_QPT_REG_UMR:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_umr_ctrl_seg) +
                        sizeof(struct mlx5_mkey_seg);
                break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                return wqe_size;
 
        if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
-               mlx5_ib_dbg(dev, "\n");
+               mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+                           wqe_size, dev->mdev.caps.max_sq_desc_sz);
                return -EINVAL;
        }
 
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+       if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+               mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+                           qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+               return -ENOMEM;
+       }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
        qp->sq.max_gs = attr->cap.max_send_sge;
-       qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
+       qp->sq.max_post = wq_size / wqe_size;
+       attr->cap.max_send_wr = qp->sq.max_post;
 
        return wq_size;
 }
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                                          MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
                                           MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+                                         MLX5_QP_OPTPAR_RRE            |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_PKEY_INDEX,
                },
        },
        [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                [MLX5_QP_STATE_RTS] = {
                        [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
+                       [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
+                                          MLX5_QP_OPTPAR_RWE           |
+                                          MLX5_QP_OPTPAR_RAE           |
+                                          MLX5_QP_OPTPAR_RRE,
                },
        },
 };
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
        rseg->reserved = 0;
 }
 
-static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
-{
-       if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
-       } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-       } else {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = 0;
-       }
-}
-
-static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
-                                 struct ib_send_wr *wr)
-{
-       aseg->swap_add          = cpu_to_be64(wr->wr.atomic.swap);
-       aseg->swap_add_mask     = cpu_to_be64(wr->wr.atomic.swap_mask);
-       aseg->compare           = cpu_to_be64(wr->wr.atomic.compare_add);
-       aseg->compare_mask      = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-}
-
 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
                             struct ib_send_wr *wr)
 {
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                        case IB_WR_ATOMIC_CMP_AND_SWP:
                        case IB_WR_ATOMIC_FETCH_AND_ADD:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_atomic_seg)) / 16;
-                               break;
-
                        case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_masked_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_masked_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
-                               break;
+                               mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+                               err = -ENOSYS;
+                               *bad_wr = wr;
+                               goto out;
 
                        case IB_WR_LOCAL_INV:
                                next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
index 84d297afd6a9889642007a4b3d6b2e0e6e25be74..0aa478bc291ae39aec0ed8c243cce72fe7ed2bdd 100644 (file)
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        mlx5_vfree(in);
        if (err) {
                mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
-               goto err_srq;
+               goto err_usr_kern_srq;
        }
 
        mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
 err_core:
        mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+
+err_usr_kern_srq:
        if (pd->uobject)
                destroy_srq_user(pd, srq);
        else
index 7c9d35f39d756950b9470d50201f882ca6fba1e3..69020173899397ee5889287ee79f0c3525ab8991 100644 (file)
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                        mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
                                   eqe->type, eqe->subtype, eq->eqn);
                        break;
-               };
+               }
 
                set_eqe_hw(eqe);
                ++eq->cons_index;
index 4ed8235d2d36d818360cce7e2d43010e0840ab62..50219ab2279d56ae6599e7189b06034bf12049d9 100644 (file)
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
                return IB_QPS_SQE;
        case OCRDMA_QPS_ERR:
                return IB_QPS_ERR;
-       };
+       }
        return IB_QPS_ERR;
 }
 
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
                return OCRDMA_QPS_SQE;
        case IB_QPS_ERR:
                return OCRDMA_QPS_ERR;
-       };
+       }
        return OCRDMA_QPS_ERR;
 }
 
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
                break;
        default:
                return -EINVAL;
-       };
+       }
 
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
        if (!cmd)
index 56e004940f1806c9203e788985395ea4ea5bdddb..0ce7674621eaba2aa310fef332695b67f3057971 100644 (file)
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
        case BE_DEV_DOWN:
                ocrdma_close(dev);
                break;
-       };
+       }
 }
 
 static struct ocrdma_driver ocrdma_drv = {
index 6e982bb43c3172d2cc36b844c1b97f8bb0960237..69f1d1221a6bea07039fc37b5ead33f941fc74a4 100644 (file)
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
                /* Unsupported */
                *ib_speed = IB_SPEED_SDR;
                *ib_width = IB_WIDTH_1X;
-       };
+       }
 }
 
 
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
        default:
                ibwc_status = IB_WC_GENERAL_ERR;
                break;
-       };
+       }
        return ibwc_status;
 }
 
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
                pr_err("%s() invalid opcode received = 0x%x\n",
                       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
                break;
-       };
+       }
 }
 
 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
index 653ac6bfc57a61147bbeb72fbc598a0d1884b734..6c923c7039a156d10eeaa7a39f339ecd84a360ac 100644 (file)
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
        int resp_data_len;
        int resp_len;
 
-       resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+       resp_data_len = 4;
        resp_len = sizeof(*srp_rsp) + resp_data_len;
 
        srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
                                    + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
 
-       if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
-               srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
-               srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
-               srp_rsp->data[3] = rsp_code;
-       }
+       srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+       srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+       srp_rsp->data[3] = rsp_code;
 
        return resp_len;
 }
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
        transport_deregister_session(se_sess);
        ch->sess = NULL;
 
+       ib_destroy_cm_id(ch->cm_id);
+
        srpt_destroy_ch_ib(ch);
 
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
        list_del(&ch->list);
        spin_unlock_irq(&sdev->spinlock);
 
-       ib_destroy_cm_id(ch->cm_id);
-
        if (ch->release_done)
                complete(ch->release_done);
 
index c0446992892533b24d3853d043cf088451db5662..e75d015024a15c7fb6eb3074299e234a69916594 100644 (file)
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
  */
 struct input_dev *input_allocate_device(void)
 {
+       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_dev *dev;
 
        dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
                device_initialize(&dev->dev);
                mutex_init(&dev->mutex);
                spin_lock_init(&dev->event_lock);
+               init_timer(&dev->timer);
                INIT_LIST_HEAD(&dev->h_list);
                INIT_LIST_HEAD(&dev->node);
 
+               dev_set_name(&dev->dev, "input%ld",
+                            (unsigned long) atomic_inc_return(&input_no) - 1);
+
                __module_get(THIS_MODULE);
        }
 
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  */
 int input_register_device(struct input_dev *dev)
 {
-       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_devres *devres = NULL;
        struct input_handler *handler;
        unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
         */
-       init_timer(&dev->timer);
        if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
                dev->timer.data = (long) dev;
                dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
        if (!dev->setkeycode)
                dev->setkeycode = input_default_setkeycode;
 
-       dev_set_name(&dev->dev, "input%ld",
-                    (unsigned long) atomic_inc_return(&input_no) - 1);
-
        error = device_add(&dev->dev);
        if (error)
                goto err_free_vals;
index 134c3b404a54d22a5b7b425c22fc88ea4949cecf..a2e758d27584c0117a5c941d36b8042ab20f425f 100644 (file)
@@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
        input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
        input_set_capability(input_dev, EV_MSC, MSC_SCAN);
 
-       if (pdata)
+       if (pdata) {
                error = pxa27x_keypad_build_keycode(keypad);
-       else
+       } else {
                error = pxa27x_keypad_build_keycode_from_dt(keypad);
+               /*
+                * Data that we get from DT resides in dynamically
+                * allocated memory so we need to update our pdata
+                * pointer.
+                */
+               pdata = keypad->pdata;
+       }
        if (error) {
                dev_err(&pdev->dev, "failed to build keycode\n");
                goto failed_put_clk;
index 082684e7f390f7d9f50201df3e5ddd325f2d0256..9365535ba7f157b98138f6f0defb14d9e1ae782a 100644 (file)
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
        if (status) {
                if (status == -ESHUTDOWN)
                        return;
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+               goto out;
        }
 
        /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
             dev->ctl_data->byte[2],
             dev->ctl_data->byte[3]);
 
-       if (status)
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+       if (status) {
+               if (status == -ESHUTDOWN)
+                       return;
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+       }
 
        spin_lock(&dev->ctl_submit_lock);
 
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
 
        if (likely(!dev->shutdown)) {
 
-               if (dev->buzzer_pending) {
+               if (dev->buzzer_pending || status) {
                        dev->buzzer_pending = 0;
                        dev->ctl_urb_pending = 1;
                        cm109_submit_buzz_toggle(dev);
index 78e4de42efaacec53c29f740fafe490dead2af84..52c9ebf94729ff5bf6531cc7773902ece632d03b 100644 (file)
@@ -223,21 +223,26 @@ static int i8042_flush(void)
 {
        unsigned long flags;
        unsigned char data, str;
-       int i = 0;
+       int count = 0;
+       int retval = 0;
 
        spin_lock_irqsave(&i8042_lock, flags);
 
-       while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) {
-               udelay(50);
-               data = i8042_read_data();
-               i++;
-               dbg("%02x <- i8042 (flush, %s)\n",
-                   data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+       while ((str = i8042_read_status()) & I8042_STR_OBF) {
+               if (count++ < I8042_BUFFER_SIZE) {
+                       udelay(50);
+                       data = i8042_read_data();
+                       dbg("%02x <- i8042 (flush, %s)\n",
+                           data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+               } else {
+                       retval = -EIO;
+                       break;
+               }
        }
 
        spin_unlock_irqrestore(&i8042_lock, flags);
 
-       return i;
+       return retval;
 }
 
 /*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
 
 static int i8042_controller_check(void)
 {
-       if (i8042_flush() == I8042_BUFFER_SIZE) {
+       if (i8042_flush()) {
                pr_err("No controller found\n");
                return -ENODEV;
        }
index 79b69ea47f747abd035d2eb44cad9d724699ddd7..e53416a4d7f3275ea2fd8f53f6a55dcbb6de02ad 100644 (file)
@@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
 }
 
 static enum power_supply_property wacom_battery_props[] = {
+       POWER_SUPPLY_PROP_SCOPE,
        POWER_SUPPLY_PROP_CAPACITY
 };
 
@@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
        int ret = 0;
 
        switch (psp) {
+               case POWER_SUPPLY_PROP_SCOPE:
+                       val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+                       break;
                case POWER_SUPPLY_PROP_CAPACITY:
                        val->intval =
                                wacom->wacom_wac.battery_capacity * 100 / 31;
index b2aa503c16b1fb08c7d94817c533794d6d9375bd..c59b797eeafaa4f6258552389437cb8d113e1642 100644 (file)
@@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
 static const struct wacom_features wacom_features_0x10D =
        { "Wacom ISDv4 10D",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10E =
+       { "Wacom ISDv4 10E",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10F =
+       { "Wacom ISDv4 10F",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x4001 =
        { "Wacom ISDv4 4001",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x100) },
        { USB_DEVICE_WACOM(0x101) },
        { USB_DEVICE_WACOM(0x10D) },
+       { USB_DEVICE_WACOM(0x10E) },
+       { USB_DEVICE_WACOM(0x10F) },
        { USB_DEVICE_WACOM(0x300) },
        { USB_DEVICE_WACOM(0x301) },
        { USB_DEVICE_WACOM(0x304) },
index fe302e33f72e7b2da024180e697422461b017bc7..c880ebaf155372eaa7460491258502c9c2d35dcd 100644 (file)
@@ -52,7 +52,7 @@ config AMD_IOMMU
        select PCI_PRI
        select PCI_PASID
        select IOMMU_API
-       depends on X86_64 && PCI && ACPI && X86_IO_APIC
+       depends on X86_64 && PCI && ACPI
        ---help---
          With this option you can enable support for AMD IOMMU hardware in
          your system. An IOMMU is a hardware component which provides
index f417e89e1e7e47e812fef6e16faf9a19432ce0b4..181c9ba929cdfa131123639862cdefb2587c8cf6 100644 (file)
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
        u32                             cbar;
        pgd_t                           *pgd;
 };
+#define INVALID_IRPTNDX                        0xff
 
 #define ARM_SMMU_CB_ASID(cfg)          ((cfg)->cbndx)
 #define ARM_SMMU_CB_VMID(cfg)          ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (IS_ERR_VALUE(ret)) {
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        root_cfg->irptndx, irq);
-               root_cfg->irptndx = -1;
+               root_cfg->irptndx = INVALID_IRPTNDX;
                goto out_free_context;
        }
 
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
        arm_smmu_tlb_inv_context(root_cfg);
 
-       if (root_cfg->irptndx != -1) {
+       if (root_cfg->irptndx != INVALID_IRPTNDX) {
                irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
                free_irq(irq, domain);
        }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
                goto out_put_parent;
        }
 
-       arm_smmu_device_reset(smmu);
-
        for (i = 0; i < smmu->num_global_irqs; ++i) {
                err = request_irq(smmu->irqs[i],
                                  arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        spin_lock(&arm_smmu_devices_lock);
        list_add(&smmu->list, &arm_smmu_devices);
        spin_unlock(&arm_smmu_devices_lock);
+
+       arm_smmu_device_reset(smmu);
        return 0;
 
 out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
                return ret;
 
        /* Oh, for a proper bus abstraction */
-       if (!iommu_present(&platform_bus_type));
+       if (!iommu_present(&platform_bus_type))
                bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
 
-       if (!iommu_present(&amba_bustype));
+       if (!iommu_present(&amba_bustype))
                bus_set_iommu(&amba_bustype, &arm_smmu_ops);
 
        return 0;
index d0e948084eaf7e2211c323f5976ecc08e5681136..9031171c141b52c5e9175fdbf6eec9bd0c4224b3 100644 (file)
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
+       raw_spin_lock(&irq_controller_lock);
        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;
-
-       raw_spin_lock(&irq_controller_lock);
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
        raw_spin_unlock(&irq_controller_lock);
@@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
        int cpu;
-       unsigned long map = 0;
+       unsigned long flags, map = 0;
+
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
        /* Convert our logical CPU mask into a physical one. */
        for_each_cpu(cpu, mask)
@@ -666,7 +667,149 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 
        /* this always happens on GIC0 */
        writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+       BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+       cpu_id = 1 << cpu_id;
+       /* this always happens on GIC0 */
+       writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+       unsigned int cpu_bit;
+
+       if (cpu >= NR_GIC_CPU_IF)
+               return -1;
+       cpu_bit = gic_cpu_map[cpu];
+       if (cpu_bit & (cpu_bit - 1))
+               return -1;
+       return __ffs(cpu_bit);
 }
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
+ * is also updated.  Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+       unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+       void __iomem *dist_base;
+       int i, ror_val, cpu = smp_processor_id();
+       u32 val, cur_target_mask, active_mask;
+
+       if (gic_nr >= MAX_GIC_NR)
+               BUG();
+
+       dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+       if (!dist_base)
+               return;
+       gic_irqs = gic_data[gic_nr].gic_irqs;
+
+       cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+       cur_target_mask = 0x01010101 << cur_cpu_id;
+       ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+       raw_spin_lock(&irq_controller_lock);
+
+       /* Update the target interface for this logical CPU */
+       gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+       /*
+        * Find all the peripheral interrupts targetting the current
+        * CPU interface and migrate them to the new CPU interface.
+        * We skip DIST_TARGET 0 to 7 as they are read-only.
+        */
+       for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+               val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+               active_mask = val & cur_target_mask;
+               if (active_mask) {
+                       val &= ~active_mask;
+                       val |= ror32(active_mask, ror_val);
+                       writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+               }
+       }
+
+       raw_spin_unlock(&irq_controller_lock);
+
+       /*
+        * Now let's migrate and clear any potential SGIs that might be
+        * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+        * is a banked register, we can only forward the SGI using
+        * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
+        * doesn't use that information anyway.
+        *
+        * For the same reason we do not adjust SGI source information
+        * for previously sent SGIs by us to other CPUs either.
+        */
+       for (i = 0; i < 16; i += 4) {
+               int j;
+               val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+               if (!val)
+                       continue;
+               writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+               for (j = i; j < i + 4; j++) {
+                       if (val & 0xff)
+                               writel_relaxed((1 << (new_cpu_id + 16)) | j,
+                                               dist_base + GIC_DIST_SOFTINT);
+                       val >>= 8;
+               }
+       }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+       if (!gic_dist_physaddr)
+               return 0;
+       return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+       struct resource res;
+       if (of_address_to_resource(node, 0, &res) == 0) {
+               gic_dist_physaddr = res.start;
+               pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+       }
+}
+
+#else
+#define gic_init_physaddr(node)  do { } while (0)
 #endif
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
                percpu_offset = 0;
 
        gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+       if (!gic_cnt)
+               gic_init_physaddr(node);
 
        if (parent) {
                irq = irq_of_parse_and_map(node, 0);
index 52377b4bf039e0cbe19249ee3361acbd6ce85014..a2e0ed6c9a4d367db910d9493974bcd116a369e1 100644 (file)
@@ -481,7 +481,7 @@ void __inline__ outpp(void __iomem *addr, word p)
 int diva_os_register_irq(void *context, byte irq, const char *name)
 {
        int result = request_irq(irq, diva_os_irq_wrapper,
-                                IRQF_DISABLED | IRQF_SHARED, name, context);
+                                IRQF_SHARED, name, context);
        return (result);
 }
 
index 7cab5c3276c2e16f9e91fb97a635e7ccdf6640b5..e1519718ce67e3ed4363078eb3e8207b6b9f1db6 100644 (file)
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
        cleanup_entity(e);
        diva_os_free(0, e->os_context);
        memset(e, 0x00, sizeof(*e));
-       diva_os_free(0, e);
 
        DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+       diva_os_free(0, e);
 
        return (0);
 }
index ca997bd4e818c6c1606ddea03d53d8d5eb51cdd4..92acc81f844d161a7e47d9477c2e49e941929d58 100644 (file)
@@ -336,7 +336,7 @@ static int __init sc_init(void)
                 */
                sc_adapter[cinst]->interrupt = irq[b];
                if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
-                               IRQF_DISABLED, interface->id,
+                               0, interface->id,
                                (void *)(unsigned long) cinst))
                {
                        kfree(sc_adapter[cinst]->channel);
index eba380d7b17f18653818bdeedaa9f56386460af1..42d2b893ea67fff1096ce4b7567a247ed194c9c0 100644 (file)
@@ -325,7 +325,6 @@ static int omap2_mbox_remove(struct platform_device *pdev)
        kfree(privblk);
        kfree(mboxblk);
        kfree(list);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
index b39f6f0b45f27b89f29e580844e41d8d315d9c18..0f12382aa35d6c939b53967b205639134e181eae 100644 (file)
@@ -498,7 +498,7 @@ struct cached_dev {
         */
        atomic_t                has_dirty;
 
-       struct ratelimit        writeback_rate;
+       struct bch_ratelimit    writeback_rate;
        struct delayed_work     writeback_rate_update;
 
        /*
@@ -507,10 +507,9 @@ struct cached_dev {
         */
        sector_t                last_read;
 
-       /* Number of writeback bios in flight */
-       atomic_t                in_flight;
+       /* Limit number of writeback bios in flight */
+       struct semaphore        in_flight;
        struct closure_with_timer writeback;
-       struct closure_waitlist writeback_wait;
 
        struct keybuf           writeback_keys;
 
index 8010eed06a51c8320786a1c49463c98c8aa70555..22d1ae72c2826a53b67656a59ea717a62a3b95a4 100644 (file)
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
 
 /* Mergesort */
 
+static void sort_key_next(struct btree_iter *iter,
+                         struct btree_iter_set *i)
+{
+       i->k = bkey_next(i->k);
+
+       if (i->k == i->end)
+               *i = iter->data[--iter->used];
+}
+
 static void btree_sort_fixup(struct btree_iter *iter)
 {
        while (iter->used > 1) {
                struct btree_iter_set *top = iter->data, *i = top + 1;
-               struct bkey *k;
 
                if (iter->used > 2 &&
                    btree_iter_cmp(i[0], i[1]))
                        i++;
 
-               for (k = i->k;
-                    k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
-                    k = bkey_next(k))
-                       if (top->k > i->k)
-                               __bch_cut_front(top->k, k);
-                       else if (KEY_SIZE(k))
-                               bch_cut_back(&START_KEY(k), top->k);
-
-               if (top->k < i->k || k == i->k)
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
                        break;
 
-               heap_sift(iter, i - top, btree_iter_cmp);
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+                       heap_sift(iter, i - top, btree_iter_cmp);
+                       continue;
+               }
+
+               if (top->k > i->k) {
+                       if (bkey_cmp(top->k, i->k) >= 0)
+                               sort_key_next(iter, i);
+                       else
+                               bch_cut_front(top->k, i->k);
+
+                       heap_sift(iter, i - top, btree_iter_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+                       bch_cut_back(&START_KEY(i->k), top->k);
+               }
        }
 }
 
index f9764e61978b5749487862b5ab88eb73b13f18ba..f42fc7ed9cd63b14fd4cf54a879dfd9d046584d1 100644 (file)
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
 
        return;
 err:
-       bch_cache_set_error(b->c, "io error reading bucket %lu",
+       bch_cache_set_error(b->c, "io error reading bucket %zu",
                            PTR_BUCKET_NR(b->c, &b->key, 0));
 }
 
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
                return SHRINK_STOP;
 
        /* Return -1 if we can't do anything right now */
-       if (sc->gfp_mask & __GFP_WAIT)
+       if (sc->gfp_mask & __GFP_IO)
                mutex_lock(&c->bucket_lock);
        else if (!mutex_trylock(&c->bucket_lock))
                return -1;
index ba95ab84b2be5a5a32292625d229bb29e051a53d..8435f81e5d858012e8aca6be8204e923a34b1d01 100644 (file)
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
                pr_debug("%u journal buckets", ca->sb.njournal_buckets);
 
-               /* Read journal buckets ordered by golden ratio hash to quickly
+               /*
+                * Read journal buckets ordered by golden ratio hash to quickly
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                                goto bsearch;
                }
 
-               /* If that fails, check all the buckets we haven't checked
+               /*
+                * If that fails, check all the buckets we haven't checked
                 * already
                 */
                pr_debug("falling back to linear search");
 
-               for (l = 0; l < ca->sb.njournal_buckets; l++) {
-                       if (test_bit(l, bitmap))
-                               continue;
-
+               for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+                    l < ca->sb.njournal_buckets;
+                    l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
                        if (read_bucket(l))
                                goto bsearch;
-               }
+
+               if (list_empty(list))
+                       continue;
 bsearch:
                /* Binary search */
                m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
                                r = m;
                }
 
-               /* Read buckets in reverse order until we stop finding more
+               /*
+                * Read buckets in reverse order until we stop finding more
                 * journal entries
                 */
-               pr_debug("finishing up");
+               pr_debug("finishing up: m %u njournal_buckets %u",
+                        m, ca->sb.njournal_buckets);
                l = m;
 
                while (1) {
@@ -228,9 +233,10 @@ bsearch:
                        }
        }
 
-       c->journal.seq = list_entry(list->prev,
-                                   struct journal_replay,
-                                   list)->j.seq;
+       if (!list_empty(list))
+               c->journal.seq = list_entry(list->prev,
+                                           struct journal_replay,
+                                           list)->j.seq;
 
        return 0;
 #undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
                return;
        }
 
-       switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+       switch (atomic_read(&ja->discard_in_flight)) {
        case DISCARD_IN_FLIGHT:
                return;
 
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
                if (cl)
                        BUG_ON(!closure_wait(&w->wait, cl));
 
+               closure_flush(&c->journal.io);
                __journal_try_write(c, true);
        }
 }
index 786a1a4f74d853fafe3ab2ae263d2ecf780134aa..2a7f0dd6abab4d6bb21ba028b5b3a25721080a2c 100644 (file)
@@ -996,17 +996,19 @@ static void request_write(struct cached_dev *dc, struct search *s)
                closure_bio_submit(bio, cl, s->d);
        } else {
                bch_writeback_add(dc);
+               s->op.cache_bio = bio;
 
-               if (s->op.flush_journal) {
+               if (bio->bi_rw & REQ_FLUSH) {
                        /* Also need to send a flush to the backing device */
-                       s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
-                                                          dc->disk.bio_split);
-
-                       bio->bi_size = 0;
-                       bio->bi_vcnt = 0;
-                       closure_bio_submit(bio, cl, s->d);
-               } else {
-                       s->op.cache_bio = bio;
+                       struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
+                                                            dc->disk.bio_split);
+
+                       flush->bi_rw    = WRITE_FLUSH;
+                       flush->bi_bdev  = bio->bi_bdev;
+                       flush->bi_end_io = request_endio;
+                       flush->bi_private = cl;
+
+                       closure_bio_submit(flush, cl, s->d);
                }
        }
 out:
index 4fe6ab2fbe2ede59644441521aa4cc2f27f5d312..924dcfdae11102256e1ce193eefc01d82cc173cc 100644 (file)
@@ -223,8 +223,13 @@ STORE(__cached_dev)
        }
 
        if (attr == &sysfs_label) {
-               /* note: endlines are preserved */
-               memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+               if (size > SB_LABEL_SIZE)
+                       return -EINVAL;
+               memcpy(dc->sb.label, buf, size);
+               if (size < SB_LABEL_SIZE)
+                       dc->sb.label[size] = '\0';
+               if (size && dc->sb.label[size - 1] == '\n')
+                       dc->sb.label[size - 1] = '\0';
                bch_write_bdev_super(dc, NULL);
                if (dc->disk.c) {
                        memcpy(dc->disk.c->uuids[dc->disk.id].label,
index 98eb81159a22ba9f9e88fec53127e2e338ae3d4d..420dad545c7d8a01e8b5c18d26db4b25677fd334 100644 (file)
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
        stats->last = now ?: 1;
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+/**
+ * bch_next_delay() - increment @d by the amount of work done, and return how
+ * long to delay until the next time to do some work.
+ *
+ * @d - the struct bch_ratelimit to update
+ * @done - the amount of work done, in arbitrary units
+ *
+ * Returns the amount of time to delay by, in jiffies
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 {
        uint64_t now = local_clock();
 
index 1ae2a73ad85f5628b5292b769d79b0f28252f4ca..ea345c6896f47777942b64f88810c99d4cbc278e 100644 (file)
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
        (ewma) >> factor;                                               \
 })
 
-struct ratelimit {
+struct bch_ratelimit {
+       /* Next time we want to do some work, in nanoseconds */
        uint64_t                next;
+
+       /*
+        * Rate at which we want to do work, in units per nanosecond
+        * The units here correspond to the units passed to bch_next_delay()
+        */
        unsigned                rate;
 };
 
-static inline void ratelimit_reset(struct ratelimit *d)
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
 {
        d->next = local_clock();
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
 
 #define __DIV_SAFE(n, d, zero)                                         \
 ({                                                                     \
index 22cbff551628f3c9cff87ccc35563c09974f03b3..ba3ee48320f2a38509adb2603f766c55e67f1da1 100644 (file)
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
+       uint64_t ret;
+
        if (atomic_read(&dc->disk.detaching) ||
            !dc->writeback_percent)
                return 0;
 
-       return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+       ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+
+       return min_t(uint64_t, ret, HZ);
 }
 
 /* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
 
        up_write(&dc->writeback_lock);
 
-       ratelimit_reset(&dc->writeback_rate);
+       bch_ratelimit_reset(&dc->writeback_rate);
 
        /* Punt to workqueue only so we don't recurse and blow the stack */
        continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
        }
 
        bch_keybuf_del(&dc->writeback_keys, w);
-       atomic_dec_bug(&dc->in_flight);
-
-       closure_wake_up(&dc->writeback_wait);
+       up(&dc->in_flight);
 
        closure_return_with_destructor(cl, dirty_io_destructor);
 }
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty_finish, dirty_wq);
+       continue_at(cl, write_dirty_finish, system_wq);
 }
 
 static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty, dirty_wq);
+       continue_at(cl, write_dirty, system_wq);
 }
 
 static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
 
                if (delay > 0 &&
                    (KEY_START(&w->key) != dc->last_read ||
-                    jiffies_to_msecs(delay) > 50)) {
-                       w->private = NULL;
-
-                       closure_delay(&dc->writeback, delay);
-                       continue_at(cl, read_dirty, dirty_wq);
-               }
+                    jiffies_to_msecs(delay) > 50))
+                       delay = schedule_timeout_uninterruptible(delay);
 
                dc->last_read   = KEY_OFFSET(&w->key);
 
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
 
                trace_bcache_writeback(&w->key);
 
-               closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+               down(&dc->in_flight);
+               closure_call(&io->cl, read_dirty_submit, NULL, cl);
 
                delay = writeback_delay(dc, KEY_SIZE(&w->key));
-
-               atomic_inc(&dc->in_flight);
-
-               if (!closure_wait_event(&dc->writeback_wait, cl,
-                                       atomic_read(&dc->in_flight) < 64))
-                       continue_at(cl, read_dirty, dirty_wq);
        }
 
        if (0) {
@@ -442,7 +435,11 @@ err:
                bch_keybuf_del(&dc->writeback_keys, w);
        }
 
-       refill_dirty(cl);
+       /*
+        * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+        * freed) before refilling again
+        */
+       continue_at(cl, refill_dirty, dirty_wq);
 }
 
 /* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
+       sema_init(&dc->in_flight, 64);
        closure_init_unlocked(&dc->writeback);
        init_rwsem(&dc->writeback_lock);
 
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
 
 int __init bch_writeback_init(void)
 {
-       dirty_wq = create_singlethread_workqueue("bcache_writeback");
+       dirty_wq = create_workqueue("bcache_writeback");
        if (!dirty_wq)
                return -ENOMEM;
 
index ea49834377c8e17b6e8b221e4bd58389a8d7c32e..2a20986a2fec9701cd25e443c990f2b7a8479f9f 100644 (file)
@@ -19,8 +19,6 @@
 #define DM_MSG_PREFIX "io"
 
 #define DM_IO_MAX_REGIONS      BITS_PER_LONG
-#define MIN_IOS                16
-#define MIN_BIOS       16
 
 struct dm_io_client {
        mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
 struct dm_io_client *dm_io_client_create(void)
 {
        struct dm_io_client *client;
+       unsigned min_ios = dm_get_reserved_bio_based_ios();
 
        client = kmalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return ERR_PTR(-ENOMEM);
 
-       client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
+       client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
        if (!client->pool)
                goto bad;
 
-       client->bios = bioset_create(MIN_BIOS, 0);
+       client->bios = bioset_create(min_ios, 0);
        if (!client->bios)
                goto bad;
 
index b759a127f9c3718bbfffe2d16ca258fe4afe15e2..de570a55876451a0326d071da4cf68fd772eec2e 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/device-mapper.h>
 
+#include "dm.h"
 #include "dm-path-selector.h"
 #include "dm-uevent.h"
 
@@ -116,8 +117,6 @@ struct dm_mpath_io {
 
 typedef int (*action_fn) (struct pgpath *pgpath);
 
-#define MIN_IOS 256    /* Mempool size */
-
 static struct kmem_cache *_mpio_cache;
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
 static struct multipath *alloc_multipath(struct dm_target *ti)
 {
        struct multipath *m;
+       unsigned min_ios = dm_get_reserved_rq_based_ios();
 
        m = kzalloc(sizeof(*m), GFP_KERNEL);
        if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                INIT_WORK(&m->trigger_event, trigger_event);
                init_waitqueue_head(&m->pg_init_wait);
                mutex_init(&m->work_mutex);
-               m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
+               m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
                        return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
        case -EREMOTEIO:
        case -EILSEQ:
        case -ENODATA:
+       case -ENOSPC:
                return 1;
        }
 
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
        if (!error && !clone->errors)
                return 0;       /* I/O complete */
 
-       if (noretry_error(error))
+       if (noretry_error(error)) {
+               if ((clone->cmd_flags & REQ_WRITE_SAME) &&
+                   !clone->q->limits.max_write_same_sectors) {
+                       struct queue_limits *limits;
+
+                       /* device doesn't really support WRITE SAME, disable it */
+                       limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
+                       limits->max_write_same_sectors = 0;
+               }
                return error;
+       }
 
        if (mpio->pgpath)
                fail_path(mpio->pgpath);
index 3ac415675b6c778b5dd22aaaf4ee6c2dc4ca48eb..2d2b1b7588d7e476b7fc814a63c82bad2edb1ed6 100644 (file)
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         */
        INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
-       flush_work(&req.work);
+       flush_workqueue(ps->metadata_wq);
 
        return req.result;
 }
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 }
 
+static void skip_metadata(struct pstore *ps)
+{
+       uint32_t stride = ps->exceptions_per_area + 1;
+       chunk_t next_free = ps->next_free;
+       if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
+               ps->next_free++;
+}
+
 /*
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
 
        ps->current_area--;
 
+       skip_metadata(ps);
+
        return 0;
 }
 
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
                                        struct dm_exception *e)
 {
        struct pstore *ps = get_info(store);
-       uint32_t stride;
-       chunk_t next_free;
        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
        /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
         * Move onto the next free pending, making sure to take
         * into account the location of the metadata chunks.
         */
-       stride = (ps->exceptions_per_area + 1);
-       next_free = ++ps->next_free;
-       if (sector_div(next_free, stride) == 1)
-               ps->next_free++;
+       ps->next_free++;
+       skip_metadata(ps);
 
        atomic_inc(&ps->pending_count);
        return 0;
index c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025..aec57d76db5d616c8e692fa95cee58a8f62a0573 100644 (file)
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
  */
 static int init_hash_tables(struct dm_snapshot *s)
 {
-       sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+       sector_t hash_size, cow_dev_size, max_buckets;
 
        /*
         * Calculate based on the size of the original volume or
         * the COW volume...
         */
        cow_dev_size = get_dev_size(s->cow->bdev);
-       origin_dev_size = get_dev_size(s->origin->bdev);
        max_buckets = calc_max_buckets();
 
-       hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+       hash_size = cow_dev_size >> s->store->chunk_shift;
        hash_size = min(hash_size, max_buckets);
 
        if (hash_size < 64)
index 8ae31e8d3d64964652fd1bd8d31d35d3ed49e879..3d404c1371ed2d7e6f4fa052fd4379fbc89ec388 100644 (file)
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
        struct dm_stat_percpu *p;
 
        /*
-        * For strict correctness we should use local_irq_disable/enable
+        * For strict correctness we should use local_irq_save/restore
         * instead of preempt_disable/enable.
         *
-        * This is racy if the driver finishes bios from non-interrupt
-        * context as well as from interrupt context or from more different
-        * interrupts.
+        * preempt_disable/enable is racy if the driver finishes bios
+        * from non-interrupt context as well as from interrupt context
+        * or from more different interrupts.
         *
-        * However, the race only results in not counting some events,
-        * so it is acceptable.
+        * On 64-bit architectures the race only results in not counting some
+        * events, so it is acceptable.  On 32-bit architectures the race could
+        * cause the counter going off by 2^32, so we need to do proper locking
+        * there.
         *
         * part_stat_lock()/part_stat_unlock() have this race too.
         */
+#if BITS_PER_LONG == 32
+       unsigned long flags;
+       local_irq_save(flags);
+#else
        preempt_disable();
+#endif
        p = &s->stat_percpu[smp_processor_id()][entry];
 
        if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
                p->ticks[idx] += duration;
        }
 
+#if BITS_PER_LONG == 32
+       local_irq_restore(flags);
+#else
        preempt_enable();
+#endif
 }
 
 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
index ed063427d676f64b53f3d9569449fc0daaf174b0..2c0cf511ec2385fa5a558b5d2e1e1ed0c874c9f6 100644 (file)
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         * them down to the data device.  The thin device's discard
         * processing will cause mappings to be removed from the btree.
         */
+       ti->discard_zeroes_data_unsupported = true;
        if (pf.discard_enabled && pf.discard_passdown) {
                ti->num_discard_bios = 1;
 
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                 * thin devices' discard limits consistent).
                 */
                ti->discards_supported = true;
-               ti->discard_zeroes_data_unsupported = true;
        }
        ti->private = pt;
 
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
         * They get transferred to the live pool in bind_control_target()
         * called from pool_preresume().
         */
-       if (!pt->adjusted_pf.discard_enabled)
+       if (!pt->adjusted_pf.discard_enabled) {
+               /*
+                * Must explicitly disallow stacking discard limits otherwise the
+                * block layer will stack them if pool's data device has support.
+                * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
+                * user to see that, so make sure to set all discard limits to 0.
+                */
+               limits->discard_granularity = 0;
                return;
+       }
 
        disable_passdown_if_not_supported(pt);
 
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
+       ti->discard_zeroes_data_unsupported = true;
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = true;
                ti->num_discard_bios = 1;
-               ti->discard_zeroes_data_unsupported = true;
                /* Discard bios must be split on a block boundary */
                ti->split_discard_bios = true;
        }
index 6a5e9ed2fcc3eb775268e2e5fc401efa55d6da3d..b3e26c7d141771c74d24726f8b5a56ea2134b156 100644 (file)
@@ -211,10 +211,55 @@ struct dm_md_mempools {
        struct bio_set *bs;
 };
 
-#define MIN_IOS 256
+#define RESERVED_BIO_BASED_IOS         16
+#define RESERVED_REQUEST_BASED_IOS     256
+#define RESERVED_MAX_IOS               1024
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_rq_tio_cache;
 
+/*
+ * Bio-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+                                     unsigned def, unsigned max)
+{
+       unsigned ios = ACCESS_ONCE(*reserved_ios);
+       unsigned modified_ios = 0;
+
+       if (!ios)
+               modified_ios = def;
+       else if (ios > max)
+               modified_ios = max;
+
+       if (modified_ios) {
+               (void)cmpxchg(reserved_ios, ios, modified_ios);
+               ios = modified_ios;
+       }
+
+       return ios;
+}
+
+unsigned dm_get_reserved_bio_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_bio_based_ios,
+                                    RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_rq_based_ios,
+                                    RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
 static int __init local_init(void)
 {
        int r = -ENOMEM;
@@ -2277,6 +2322,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
        return md->immutable_target_type;
 }
 
+/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'.
+ */
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+       BUG_ON(!atomic_read(&md->holders));
+       return &md->queue->limits;
+}
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
 /*
  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
  */
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
 
        if (type == DM_TYPE_BIO_BASED) {
                cachep = _io_cache;
-               pool_size = 16;
+               pool_size = dm_get_reserved_bio_based_ios();
                front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
        } else if (type == DM_TYPE_REQUEST_BASED) {
                cachep = _rq_tio_cache;
-               pool_size = MIN_IOS;
+               pool_size = dm_get_reserved_rq_based_ios();
                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
                /* per_bio_data_size is not used. See __bind_mempools(). */
                WARN_ON(per_bio_data_size != 0);
        } else
                goto out;
 
-       pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
+       pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
        if (!pools->io_pool)
                goto out;
 
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
 
 module_param(major, uint, 0);
 MODULE_PARM_DESC(major, "The major number of the device mapper");
+
+module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
 MODULE_DESCRIPTION(DM_NAME " driver");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 5e604cc7b4aa26c41db96c84c1fa0f306bcb698e..1d1ad7b7e527e671d1265007b25e2e04217509ab 100644 (file)
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
 /*
  * Helpers that are used by DM core
  */
+unsigned dm_get_reserved_bio_based_ios(void);
+unsigned dm_get_reserved_rq_based_ios(void);
+
 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 {
        return !maxlen || strlen(result) + 1 >= maxlen;
index adf4d7e1d5e15233a67e7108dc24b8a7bb6efeb1..561a65f82e26af05c8210f94b49367b6fa51e55b 100644 (file)
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        u64 *p;
        int lo, hi;
        int rv = 1;
+       unsigned long flags;
 
        if (bb->shift < 0)
                /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
                sectors = next - s;
        }
 
-       write_seqlock_irq(&bb->lock);
+       write_seqlock_irqsave(&bb->lock, flags);
 
        p = bb->page;
        lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        bb->changed = 1;
        if (!acknowledged)
                bb->unacked_exist = 1;
-       write_sequnlock_irq(&bb->lock);
+       write_sequnlock_irqrestore(&bb->lock, flags);
 
        return rv;
 }
index d60412c7f9952a350e85002f6578bd66ee416a44..aacf6bf352d87978a15633b05a0d51d3579ce16d 100644 (file)
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
                        }
                }
                if (rdev
+                   && rdev->recovery_offset == MaxSector
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
                        count++;
index df7b0a06b0ea2d820aec6ad5a752a13bb64e13e8..73dc8a377522e19af92c9bed26519a479f36dbea 100644 (file)
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
                        }
                        sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
                } else if (tmp->rdev
+                          && tmp->rdev->recovery_offset == MaxSector
                           && !test_bit(Faulty, &tmp->rdev->flags)
                           && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
                        count++;
index 7ff4f252ca1a42943252a9e19b975da5b2f8ce9b..f8b9068439267a3539d671e6e59ceb0b47b90547 100644 (file)
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               bi->bi_vcnt = 0;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
                                                      rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                }
                /* now that discard is done we can proceed with any sync */
                clear_bit(STRIPE_DISCARD, &sh->state);
+               /*
+                * SCSI discard will change some bio fields and the stripe has
+                * no updated data, so remove it from hash list and the stripe
+                * will be reinitialized
+                */
+               spin_lock_irq(&conf->device_lock);
+               remove_hash(sh);
+               spin_unlock_irq(&conf->device_lock);
                if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
                        set_bit(STRIPE_HANDLE, &sh->state);
 
index 2521f7e23018f70a33a9ad43e1bb26e79e037ab8..e79749cfec814b75f0ac84d2f5ad881399a79a75 100644 (file)
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
                { 0xd5, 0x03, 0x03 },
        };
 
-       /* firmware status */
-       ret = tda10071_rd_reg(priv, 0x51, &tmp);
-       if (ret)
-               goto error;
-
-       if (!tmp) {
+       if (priv->warm) {
                /* warm state - wake up device from sleep */
-               priv->warm = 1;
 
                for (i = 0; i < ARRAY_SIZE(tab); i++) {
                        ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
                        goto error;
        } else {
                /* cold state - try to download firmware */
-               priv->warm = 0;
 
                /* request the firmware, this will block and timeout */
                ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
index bb0c99d7a4f169d25c642b5143efcf36c087c397..b06a7e54ee0d25100f1f01abcac7ddc70e8489df 100644 (file)
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
 
 static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+               V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
index 7a576097471f352939cc703d6fbac368cf02f532..7c8d971f1f613206b1821bfa023ea5326ba48028 100644 (file)
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
 
 static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = ADV7511_MAX_WIDTH,
-               .max_height = ADV7511_MAX_HEIGHT,
-               .min_pixelclock = ADV7511_MIN_PIXELCLOCK,
-               .max_pixelclock = ADV7511_MAX_PIXELCLOCK,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+               ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
        if (state->i2c_edid == NULL) {
                v4l2_err(sd, "failed to register edid i2c client\n");
+               err = -ENOMEM;
                goto err_entity;
        }
 
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->work_queue = create_singlethread_workqueue(sd->name);
        if (state->work_queue == NULL) {
                v4l2_err(sd, "could not create workqueue\n");
+               err = -ENOMEM;
                goto err_unreg_cec;
        }
 
index d1748901337cd08be083e9a6a7d35958f6aeca7b..22f729d66a9696522e18d42932ae383594cbca5f 100644 (file)
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 225000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline const struct v4l2_dv_timings_cap *
index b76ec0e7e685ee8b1fa4d3fd99180a96b595e8d0..1083890ac5a9fa198195ad931f8943f1b67fa340 100644 (file)
@@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client,
        oif_sd = &state->oif_sd;
 
        v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
-       sd->owner = client->driver->driver.owner;
+       sd->owner = client->dev.driver->owner;
        v4l2_set_subdevdata(sd, state);
        strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
 
index a58a8f663ffbb3a028540fc8763ce5521e55e828..d9f65d7e3e58b64dea5b930d5800e20921fd8778 100644 (file)
@@ -46,14 +46,10 @@ struct ths8200_state {
 
 static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1080,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 148500000,
-               .standards = V4L2_DV_BT_STD_CEA861,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
-       },
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+               V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
 };
 
 static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
index e12bbd8c3f0b8da63f8418e6a00bc803f883963a..fb60da85bc2c039e3ab7a6db22fa1182c4e977bb 100644 (file)
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
 
        /* stop video capture */
        if (res_check(fh, RESOURCE_VIDEO)) {
+               pm_qos_remove_request(&dev->qos_request);
                videobuf_streamoff(&fh->cap);
                res_free(dev,fh,RESOURCE_VIDEO);
        }
index a8351127831726bea95214d5e55d7f75535ac6d2..7a4ee4c0449deea95dc86e82a85af8412a1d5aad 100644 (file)
@@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
 
        device_lock(&client->dev);
 
-       if (!client->driver ||
-           !try_module_get(client->driver->driver.owner)) {
+       if (!client->dev.driver ||
+           !try_module_get(client->dev.driver->owner)) {
                ret = -EPROBE_DEFER;
                v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n",
                                                node->full_name);
@@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
        fmd->num_sensors++;
 
 mod_put:
-       module_put(client->driver->driver.owner);
+       module_put(client->dev.driver->owner);
 dev_put:
        device_unlock(&client->dev);
        put_device(&client->dev);
index 5184887b155c7098415b1a3470619be336023f87..32fab30a910590ba290ef987aaad8ed1df78e74f 100644 (file)
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
 {
        struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
-       struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
        struct mcam_dma_desc *desc = mvb->dma_desc;
        struct scatterlist *sg;
        int i;
 
-       mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
-                       DMA_FROM_DEVICE);
+       mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
+                       sg_table->nents, DMA_FROM_DEVICE);
        if (mvb->dma_desc_nent <= 0)
                return -EIO;  /* Not sure what's right here */
-       for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+       for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
                desc->dma_addr = sg_dma_address(sg);
                desc->segment_len = sg_dma_len(sg);
                desc++;
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
 static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
 {
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
-       struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
 
-       dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+       if (sg_table)
+               dma_unmap_sg(cam->dev, sg_table->sgl,
+                               sg_table->nents, DMA_FROM_DEVICE);
        return 0;
 }
 
index df3a0ec7fd2c68f83bf2d287efd7a75378766980..1c3608039663e281d23541f17aedcbce044c87fb 100644 (file)
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
        isp->pdata = pdata;
        isp->ref_count = 0;
 
-       isp->raw_dmamask = DMA_BIT_MASK(32);
-       isp->dev->dma_mask = &isp->raw_dmamask;
-       isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        platform_set_drvdata(pdev, isp);
 
index cd3eff45ae7d50d0a8b28f4cf5c95dab80abb913..ce65d3ae1aa7b8070f5161bad3caf9d4581b1cb9 100644 (file)
@@ -152,7 +152,6 @@ struct isp_xclk {
  * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
  *                  regions.
  * @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
  * @stat_lock: Spinlock for handling statistics
  * @isp_mutex: Mutex for serializing requests to ISP.
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
        unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
        resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
 
-       u64 raw_dmamask;
-
        /* ISP Obj */
        spinlock_t stat_lock;   /* common lock for statistic drivers */
        struct mutex isp_mutex; /* For handling ref_count field */
index 15d23968d1de8392a61deb4a4bcd2b0489ce689d..9b88a46010072fbe3b4aeefa46bbc8ee0f04ea7e 100644 (file)
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
        jpeg->vfd_decoder->release      = video_device_release;
        jpeg->vfd_decoder->lock         = &jpeg->lock;
        jpeg->vfd_decoder->v4l2_dev     = &jpeg->v4l2_dev;
+       jpeg->vfd_decoder->vfl_dir      = VFL_DIR_M2M;
 
        ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
        if (ret) {
index 7a9c5e9329f2655327c2c3d6a22e660a11728d38..4f30341dc2ab239ed4b736292c1b7ccb6d208b5e 100644 (file)
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
        v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
                              &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
 
-       for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+       for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
                if (vou_fmt[i].pfmt == pix->pixelformat)
                        return 0;
 
index 8f9f6211c52e1e052f0ce4db84b3bcd951260d72..f975b70086922c866dc415f9eca4506aef3c19ea 100644 (file)
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
        struct idmac_video_param *video = &ichan->params.video;
        const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
-       unsigned long flags;
        dma_cookie_t cookie;
        size_t new_size;
 
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
                memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
 #endif
 
-       spin_lock_irqsave(&mx3_cam->lock, flags);
+       spin_lock_irq(&mx3_cam->lock);
        list_add_tail(&buf->queue, &mx3_cam->capture);
 
        if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        if (mx3_cam->active == buf)
                mx3_cam->active = NULL;
 
-       spin_unlock_irqrestore(&mx3_cam->lock, flags);
+       spin_unlock_irq(&mx3_cam->lock);
 error:
        vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 }
index ad9309da4a9102779f67cbd999aee72db465c51d..6c96e4898777f29f5c9ae4457d488e9424b32f2b 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include "e4000_priv.h"
+#include <linux/math64.h>
 
 /* write multiple registers */
 static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
         * or more.
         */
        f_vco = c->frequency * e4000_pll_lut[i].mul;
-       sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock;
+       sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
        buf[0] = f_vco / priv->cfg->clock;
        buf[1] = (sigma_delta >> 0) & 0xff;
        buf[2] = (sigma_delta >> 8) & 0xff;
index 38714df31ac49878d13d64a9e626f26f1813041b..2e15c80d6e3d11b953d210b553cbc6878110427f 100644 (file)
@@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index 064b53043b153359d70b2b6f3abdbc31a9131d6f..f23df4a9d8c56e460840082bfd5bb9ebcafce511 100644 (file)
@@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
                sd->params.format.videoSize = VIDEOSIZE_CIF;
 
        sd->params.roi.colEnd = sd->params.roi.colStart +
-                               (gspca_dev->width >> 3);
+                               (gspca_dev->pixfmt.width >> 3);
        sd->params.roi.rowEnd = sd->params.roi.rowStart +
-                               (gspca_dev->height >> 2);
+                               (gspca_dev->pixfmt.height >> 2);
 
        /* And now set the camera to a known state */
        ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode,
index 048507b27bb2388573ea8d76077e32a5eb1c708e..f3a7ace0fac9cc8bd4e2b2c8b82038dc85aa7068 100644 (file)
@@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
        unsigned int frsz;
        int i;
 
-       i = gspca_dev->curr_mode;
-       frsz = gspca_dev->cam.cam_mode[i].sizeimage;
+       frsz = gspca_dev->pixfmt.sizeimage;
        PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
        frsz = PAGE_ALIGN(frsz);
        if (count >= GSPCA_MAX_FRAMES)
@@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
 static u32 which_bandwidth(struct gspca_dev *gspca_dev)
 {
        u32 bandwidth;
-       int i;
 
        /* get the (max) image size */
-       i = gspca_dev->curr_mode;
-       bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
+       bandwidth = gspca_dev->pixfmt.sizeimage;
 
        /* if the image is compressed, estimate its mean size */
        if (!gspca_dev->cam.needs_full_bandwidth &&
-           bandwidth < gspca_dev->cam.cam_mode[i].width *
-                               gspca_dev->cam.cam_mode[i].height)
+           bandwidth < gspca_dev->pixfmt.width *
+                               gspca_dev->pixfmt.height)
                bandwidth = bandwidth * 3 / 8;  /* 0.375 */
 
        /* estimate the frame rate */
@@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev)
 
                /* don't hope more than 15 fps with USB 1.1 and
                 * image resolution >= 640x480 */
-               if (gspca_dev->width >= 640
+               if (gspca_dev->pixfmt.width >= 640
                 && gspca_dev->dev->speed == USB_SPEED_FULL)
                        bandwidth *= 15;                /* 15 fps */
                else
@@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
 
        i = gspca_dev->cam.nmodes - 1;  /* take the highest mode */
        gspca_dev->curr_mode = i;
-       gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
-       gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
-       gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+       gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i];
 
        /* does nothing if ctrl_handler == NULL */
        v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
@@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
                            struct v4l2_format *fmt)
 {
        struct gspca_dev *gspca_dev = video_drvdata(file);
-       int mode;
 
-       mode = gspca_dev->curr_mode;
-       fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+       fmt->fmt.pix = gspca_dev->pixfmt;
        /* some drivers use priv internally, zero it before giving it to
           userspace */
        fmt->fmt.pix.priv = 0;
@@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
                        mode = mode2;
        }
        fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+       if (gspca_dev->sd_desc->try_fmt) {
+               /* pass original resolution to subdriver try_fmt */
+               fmt->fmt.pix.width = w;
+               fmt->fmt.pix.height = h;
+               gspca_dev->sd_desc->try_fmt(gspca_dev, fmt);
+       }
        /* some drivers use priv internally, zero it before giving it to
           userspace */
        fmt->fmt.pix.priv = 0;
@@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
                goto out;
        }
 
-       if (ret == gspca_dev->curr_mode) {
-               ret = 0;
-               goto out;                       /* same mode */
-       }
-
        if (gspca_dev->streaming) {
                ret = -EBUSY;
                goto out;
        }
-       gspca_dev->width = fmt->fmt.pix.width;
-       gspca_dev->height = fmt->fmt.pix.height;
-       gspca_dev->pixfmt = fmt->fmt.pix.pixelformat;
        gspca_dev->curr_mode = ret;
+       if (gspca_dev->sd_desc->try_fmt)
+               /* subdriver try_fmt can modify format parameters */
+               gspca_dev->pixfmt = fmt->fmt.pix;
+       else
+               gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret];
 
        ret = 0;
 out:
@@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
        int i;
        __u32 index = 0;
 
+       if (gspca_dev->sd_desc->enum_framesizes)
+               return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize);
+
        for (i = 0; i < gspca_dev->cam.nmodes; i++) {
                if (fsize->pixel_format !=
                                gspca_dev->cam.cam_mode[i].pixelformat)
@@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv,
                if (ret < 0)
                        goto out;
        }
-       PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt,
-                   gspca_dev->width, gspca_dev->height);
+       PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK",
+                   gspca_dev->pixfmt.pixelformat,
+                   gspca_dev->pixfmt.width, gspca_dev->pixfmt.height);
        ret = 0;
 out:
        mutex_unlock(&gspca_dev->queue_lock);
index ac0b11f46f5037d0cb29eaeb8896b445537fa76b..300642dc1a177207c1e4b9952f20404e19667c8e 100644 (file)
@@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev,
 typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev,
                                u8 *data,
                                int len);
+typedef void (*cam_format_op) (struct gspca_dev *gspca_dev,
+                               struct v4l2_format *fmt);
+typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev,
+                               struct v4l2_frmsizeenum *fsize);
 
 /* subdriver description */
 struct sd_desc {
@@ -109,6 +113,8 @@ struct sd_desc {
        cam_set_jpg_op set_jcomp;
        cam_streamparm_op get_streamparm;
        cam_streamparm_op set_streamparm;
+       cam_format_op try_fmt;
+       cam_frmsize_op enum_framesizes;
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        cam_set_reg_op set_register;
        cam_get_reg_op get_register;
@@ -183,9 +189,7 @@ struct gspca_dev {
        __u8 streaming;                 /* protected by both mutexes (*) */
 
        __u8 curr_mode;                 /* current camera mode */
-       __u32 pixfmt;                   /* current mode parameters */
-       __u16 width;
-       __u16 height;
+       struct v4l2_pix_format pixfmt;  /* current mode parameters */
        __u32 sequence;                 /* frame sequence number */
 
        wait_queue_head_t wq;           /* wait queue */
index 8da3dde383853d9c0faaad80a7f0c53eadaf449b..19736e237b37d6a14e50141c8d24104141783f16 100644 (file)
@@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *dev = (struct sd *) gspca_dev;
 
        /* create the JPEG header */
-       jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
        jpeg_set_qual(dev->jpeg_hdr, dev->quality);
        PDEBUG(D_STREAM, "Start streaming at %dx%d",
-               gspca_dev->height, gspca_dev->width);
+               gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
        jlj_start(gspca_dev);
        return gspca_dev->usb_err;
 }
index fdaeeb14453fbb42d82d9d31c0d64e088333bed2..5b481fa430992a7b37b9d14f9df108498f0c4d86 100644 (file)
@@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        sd->cap_mode = gspca_dev->cam.cam_mode;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 640:
                PDEBUG(D_STREAM, "Start streaming at vga resolution");
                jl2005c_stream_start_vga_lg(gspca_dev);
index cfa4663f8934ebea47eb7d8d116a4405e875c17d..27fcef11aef42998b0132b26f3b3b4790ee06dd7 100644 (file)
@@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev)
                return err;
 
        data[0] = MT9M111_RMB_OVER_SIZED;
-       if (gspca_dev->width == 640) {
+       if (gspca_dev->pixfmt.width == 640) {
                data[1] = MT9M111_RMB_ROW_SKIP_2X |
                          MT9M111_RMB_COLUMN_SKIP_2X |
                          (hflip << 1) | vflip;
index ff2c5abf115ba635d59a363f190d04acb21ceb66..779a8785f421bbcb67c62840c8a67478ee0a44a2 100644 (file)
@@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int i;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
@@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        data[0] = 0x00;         /* address */
        data[1] = 0x0c | 0x01;  /* reg 0 */
        data[2] = 0x01;         /* reg 1 */
-       data[3] = gspca_dev->width / 8;         /* h_size , reg 2 */
-       data[4] = gspca_dev->height / 8;        /* v_size , reg 3 */
+       data[3] = gspca_dev->pixfmt.width / 8;  /* h_size , reg 2 */
+       data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */
        data[5] = 0x30;         /* reg 4, MI, PAS5101 :
                                 *      0x30 for 24mhz , 0x28 for 12mhz */
        data[6] = 0x02;         /* reg 5, H start - was 0x04 */
index 68bb2f35966656e8005bb9ef6cb08707ef266bbb..f006e29ca0197b66ec62dfd821dcb520e24635bf 100644 (file)
@@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
        if (sd->sensor_type)
                data[5] = 0xbb;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                data[9] |= 0x04;  /* reg 8, 2:1 scale down from 320 */
                /* fall thru */
@@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
                data[10] = 0x18;
        }
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                data[9] |= 0x0c;  /* reg 8, 4:1 scale down */
                /* fall thru */
@@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv)
                u8 clockdiv = (60 * expo + 7999) / 8000;
 
                /* Limit framerate to not exceed usb bandwidth */
-               if (clockdiv < min_clockdiv && gspca_dev->width >= 320)
+               if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320)
                        clockdiv = min_clockdiv;
                else if (clockdiv < 2)
                        clockdiv = 2;
index 44c9964b1b3e206085ac1ea3dea8e23048aaead2..599f755e75b86513c28228468bc19e15278a414a 100644 (file)
@@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
 
        reg_r(gspca_dev, 0x1004, 1);
        if (gspca_dev->usb_buf[0] & 0x04) {     /* if AE_FULL_FRM */
-               sd->ae_res = gspca_dev->width * gspca_dev->height;
+               sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        } else {                                /* get the AE window size */
                reg_r(gspca_dev, 0x1011, 8);
                w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]
@@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
                  - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6];
                sd->ae_res = h * w;
                if (sd->ae_res == 0)
-                       sd->ae_res = gspca_dev->width * gspca_dev->height;
+                       sd->ae_res = gspca_dev->pixfmt.width *
+                                       gspca_dev->pixfmt.height;
        }
 }
 
@@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev)
        reg_w_buf(gspca_dev, cmd);
        switch (sd->webcam) {
        case P35u:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, nw801_start_qvga);
                else
                        reg_w_buf(gspca_dev, nw801_start_vga);
                reg_w_buf(gspca_dev, nw801_start_2);
                break;
        case Kr651us:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, kr651_start_qvga);
                else
                        reg_w_buf(gspca_dev, kr651_start_vga);
                reg_w_buf(gspca_dev, kr651_start_2);
                break;
        case Proscope:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, proscope_start_qvga);
                else
                        reg_w_buf(gspca_dev, proscope_start_vga);
index 8937d79fd1762bcea5ebd863d2eecf6d0d69c0cc..c95f32a0c02b4283d0f4c66b253e0a7e402f392d 100644 (file)
@@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 
        switch (sd->bridge) {
        case BRIDGE_OVFX2:
-               if (gspca_dev->width != 800)
+               if (gspca_dev->pixfmt.width != 800)
                        gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
                else
                        gspca_dev->cam.bulk_size = 7 * 4096;
@@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd)
        /* Here I'm assuming that snapshot size == image size.
         * I hope that's always true. --claudio
         */
-       hsegs = (sd->gspca_dev.width >> 3) - 1;
-       vsegs = (sd->gspca_dev.height >> 3) - 1;
+       hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1;
+       vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1;
 
        reg_w(sd, R511_CAM_PXCNT, hsegs);
        reg_w(sd, R511_CAM_LNCNT, vsegs);
@@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd)
        case SEN_OV7640:
        case SEN_OV7648:
        case SEN_OV76BE:
-               if (sd->gspca_dev.width == 320)
+               if (sd->gspca_dev.pixfmt.width == 320)
                        interlaced = 1;
                /* Fall through */
        case SEN_OV6630:
@@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd)
                case 30:
                case 25:
                        /* Not enough bandwidth to do 640x480 @ 30 fps */
-                       if (sd->gspca_dev.width != 640) {
+                       if (sd->gspca_dev.pixfmt.width != 640) {
                                sd->clockdiv = 0;
                                break;
                        }
@@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd)
 
        /* Check if we have enough bandwidth to disable compression */
        fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
-       needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+       needed = fps * sd->gspca_dev.pixfmt.width *
+                       sd->gspca_dev.pixfmt.height * 3 / 2;
        /* 1000 isoc packets/sec */
        if (needed > 1000 * packet_size) {
                /* Enable Y and UV quantization and compression */
@@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd)
                reg_w(sd, 0x38, 0x80);
        }
 
-       hsegs = sd->gspca_dev.width / 16;
-       vsegs = sd->gspca_dev.height / 4;
+       hsegs = sd->gspca_dev.pixfmt.width / 16;
+       vsegs = sd->gspca_dev.pixfmt.height / 4;
 
        reg_w(sd, 0x29, hsegs);
        reg_w(sd, 0x2a, vsegs);
@@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd)
                         * happened to be with revision < 2 cams using an
                         * OV7620 and revision 2 cams using an OV7620AE.
                         */
-                       if (sd->revision > 0 && sd->gspca_dev.width == 640) {
+                       if (sd->revision > 0 &&
+                                       sd->gspca_dev.pixfmt.width == 640) {
                                reg_w(sd, 0x20, 0x60);
                                reg_w(sd, 0x21, 0x1f);
                        } else {
@@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd)
                break;
        }
 
-       reg_w(sd, OV519_R10_H_SIZE,     sd->gspca_dev.width >> 4);
-       reg_w(sd, OV519_R11_V_SIZE,     sd->gspca_dev.height >> 3);
+       reg_w(sd, OV519_R10_H_SIZE,     sd->gspca_dev.pixfmt.width >> 4);
+       reg_w(sd, OV519_R11_V_SIZE,     sd->gspca_dev.pixfmt.height >> 3);
        if (sd->sensor == SEN_OV7670 &&
            sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
                reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
@@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
            }
        case SEN_OV3610:
                if (qvga) {
-                       xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4);
-                       ystart = (776 - gspca_dev->height) / 2;
+                       xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
+                               (0x1f << 4);
+                       ystart = (776 - gspca_dev->pixfmt.height) / 2;
                } else {
-                       xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4);
-                       ystart = (1544 - gspca_dev->height) / 2;
+                       xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
+                               (0x10 << 4);
+                       ystart = (1544 - gspca_dev->pixfmt.height) / 2;
                }
-               xend = xstart + gspca_dev->width;
-               yend = ystart + gspca_dev->height;
+               xend = xstart + gspca_dev->pixfmt.width;
+               yend = ystart + gspca_dev->pixfmt.height;
                /* Writing to the COMH register resets the other windowing regs
                   to their default values, so we must do this first. */
                i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
@@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* Default for most bridges, allow bridge_mode_init_regs to override */
-       sd->sensor_width = sd->gspca_dev.width;
-       sd->sensor_height = sd->gspca_dev.height;
+       sd->sensor_width = sd->gspca_dev.pixfmt.width;
+       sd->sensor_height = sd->gspca_dev.pixfmt.height;
 
        switch (sd->bridge) {
        case BRIDGE_OV511:
@@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
                ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
                if (in[8] & 0x80) {
                        /* Frame end */
-                       if ((in[9] + 1) * 8 != gspca_dev->width ||
-                           (in[10] + 1) * 8 != gspca_dev->height) {
+                       if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
+                           (in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
                                PERR("Invalid frame size, got: %dx%d,"
                                        " requested: %dx%d\n",
                                        (in[9] + 1) * 8, (in[10] + 1) * 8,
-                                       gspca_dev->width, gspca_dev->height);
+                                       gspca_dev->pixfmt.width,
+                                       gspca_dev->pixfmt.height);
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                                return;
                        }
@@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
                if (sd->first_frame) {
                        sd->first_frame--;
                        if (gspca_dev->image_len <
-                                 sd->gspca_dev.width * sd->gspca_dev.height)
+                                 sd->gspca_dev.pixfmt.width *
+                                       sd->gspca_dev.pixfmt.height)
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                }
                gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
index 03a33c46ca2c0ede859867d00b0652f911566be9..90f0d637cd9d05f66450ef645859cf7f5f7fb381 100644 (file)
@@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
                /* If this packet is marked as EOF, end the frame */
                } else if (data[1] & UVC_STREAM_EOF) {
                        sd->last_pts = 0;
-                       if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV
+                       if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV
                         && gspca_dev->image_len + len - 12 !=
-                                  gspca_dev->width * gspca_dev->height * 2) {
+                                  gspca_dev->pixfmt.width *
+                                       gspca_dev->pixfmt.height * 2) {
                                PDEBUG(D_PACK, "wrong sized frame");
                                goto discard;
                        }
index 83519be94e58c704b7b50c50b69cce558645cb1b..cd79c180f67b87e84689a8a162b4962bf5691cde 100644 (file)
@@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8);
 
        /* Compression Balance */
-       if (gspca_dev->width == 176)
+       if (gspca_dev->pixfmt.width == 176)
                pac207_write_reg(gspca_dev, 0x4a, 0xff);
        else
                pac207_write_reg(gspca_dev, 0x4a, 0x30);
@@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
                mode = 0x00;
        else
                mode = 0x02;
-       if (gspca_dev->width == 176) {  /* 176x144 */
+       if (gspca_dev->pixfmt.width == 176) {   /* 176x144 */
                mode |= 0x01;
                PDEBUG(D_STREAM, "pac207_start mode 176x144");
        } else {                                /* 352x288 */
index 1a5bdc853a80dbc2fb9a3c15360f2243a7f77f91..25f86b1e74a80b9c9d6f4d856b8156a216d47102 100644 (file)
@@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
         *  640x480 mode and page 4 reg 2 <= 3 then it must be 9
         */
        reg_w(gspca_dev, 0xff, 0x01);
-       if (gspca_dev->width != 640 && val <= 3)
+       if (gspca_dev->pixfmt.width != 640 && val <= 3)
                reg_w(gspca_dev, 0x08, 0x09);
        else
                reg_w(gspca_dev, 0x08, 0x08);
@@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
         * camera to use higher compression or we may run out of
         * bandwidth.
         */
-       if (gspca_dev->width == 640 && val == 2)
+       if (gspca_dev->pixfmt.width == 640 && val == 2)
                reg_w(gspca_dev, 0x80, 0x01);
        else
                reg_w(gspca_dev, 0x80, 0x1c);
@@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
 
                /* Start the new frame with the jpeg header */
                pac_start_frame(gspca_dev,
-                       gspca_dev->height, gspca_dev->width);
+                       gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
        }
        gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
 }
index 5f729b8aa2bd64bee74af9c02b1954a12c4bb304..5102cea504710c5c2aeeba1b0101159f35b0b459 100644 (file)
@@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
 
        /* set size + mode */
        se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
-                       gspca_dev->width * mult, 0);
+                       gspca_dev->pixfmt.width * mult, 0);
        se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
-                       gspca_dev->height * mult, 0);
+                       gspca_dev->pixfmt.height * mult, 0);
        /*
         * HDG: disabled this as it does not seem to do anything
         * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
@@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
 static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
 {
        struct sd *sd = (struct sd *)gspca_dev;
-       int imagesize = gspca_dev->width * gspca_dev->height;
+       int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        int i, plen, bits, pixels, info, count;
 
        if (sd->restart_stream)
index f4453d52801b94140cc1201f437ba68fec694095..2a38621cf7188d1f8cdfe03e7d1fede7ffcad566 100644 (file)
@@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
                        return 0;
                }
 
-               switch (gspca_dev->width) {
+               switch (gspca_dev->pixfmt.width) {
                case 160: /* 160x120 */
                        gspca_dev->alt = 2;
                        break;
@@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
 {
        struct sd *sd = (struct sd *) gspca_dev;
        int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
-       int width = gspca_dev->width;
-       int height = gspca_dev->height;
+       int width = gspca_dev->pixfmt.width;
+       int height = gspca_dev->pixfmt.height;
        u8 fmt, scale = 0;
 
        jpeg_define(sd->jpeg_hdr, height, width,
index d7ff3b9687c57cb22c504e7a3a9493e41fd924eb..7277dbd2afcdb8c88629aafc2c9013b38cd79390 100644 (file)
@@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
                if (gspca_dev->usb_buf[0] & 0x04) {
                        if (gspca_dev->usb_buf[0] & 0x08) {
                                dev_err(gspca_dev->v4l2_dev.dev,
-                                       "i2c error writing %02x %02x %02x %02x"
-                                       " %02x %02x %02x %02x\n",
-                                       buf[0], buf[1], buf[2], buf[3],
-                                       buf[4], buf[5], buf[6], buf[7]);
+                                       "i2c error writing %8ph\n", buf);
                                gspca_dev->usb_err = -EIO;
                        }
                        return;
@@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
                /* In 640x480, if the reg11 has less than 4, the image is
                   unstable (the bridge goes into a higher compression mode
                   which we have not reverse engineered yet). */
-               if (gspca_dev->width == 640 && reg11 < 4)
+               if (gspca_dev->pixfmt.width == 640 && reg11 < 4)
                        reg11 = 4;
 
                /* frame exposure time in ms = 1000 * reg11 / 30    ->
index 3b5ccb1c4cdf11e0597995f1acdd3e74d182cabe..c69b45d7cfbf3995281ce1ede849a6b761474d77 100644 (file)
@@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
                                { 0x14, 0xe7, 0x1e, 0xdd };
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
 
        /* initialize the bridge */
index 688592b289eafad0055e4f10d747492a6b65d0b5..f38fd8949609fd917cee8568a4cba9bdb5e83cb4 100644 (file)
@@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* initialize the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
 
        /* the JPEG quality shall be 85% */
index 9f8bf51fd64b64cb1e4544a722f2b0d9da308c1e..f011a309dd65f4d0776e82cb55f4e6c3fb4ccf58 100644 (file)
@@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        __u8 xmult, ymult;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index acb19fb9a3df3a7d8914eb4f5efa927aa6a18fad..aa21edc9502d67466f7ed13959d85709b9e36889 100644 (file)
@@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
 
        dev->cap_mode = gspca_dev->cam.cam_mode;
        /* "Open the shutter" and set size, to start capture */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 640:
                PDEBUG(D_STREAM, "Start streaming at high resolution");
                dev->cap_mode++;
index b10d0821111cba7815be078d338c8e1f0917e5ef..e274cf19a3ea22a1448f340f7a779c5a1274d528 100644 (file)
@@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 
        gspca_dev->cam.bulk_nurbs = 1;  /* there must be one URB only */
        sd->do_ctrl = 0;
-       gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8;
+       gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width *
+                       gspca_dev->pixfmt.height + 8;
        return 0;
 }
 
index 8c0982607f25c0ca8b14518be8ebee8b40b50278..b0c70fea760ba68a8235372e9c0ddcc2970af941 100644 (file)
@@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int ret, value;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
@@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_par(gspca_dev, 0x00000000);
        set_par(gspca_dev, 0x8002e001);
        set_par(gspca_dev, 0x14000000);
-       if (gspca_dev->width > 320)
+       if (gspca_dev->pixfmt.width > 320)
                value = 0x8002e001;             /* 640x480 */
        else
                value = 0x4001f000;             /* 320x240 */
index 585868835aceab032418a13448cf2d00007947f8..8add2f74dedaae59d339ba65b8ebbe115f9d5dc6 100644 (file)
@@ -48,42 +48,11 @@ struct sd {
 };
 
 static const struct v4l2_pix_format stk1135_modes[] = {
-       {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 160,
-               .sizeimage = 160 * 120,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 176,
-               .sizeimage = 176 * 144,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 320,
-               .sizeimage = 320 * 240,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 352,
-               .sizeimage = 352 * 288,
-               .colorspace = V4L2_COLORSPACE_SRGB},
+       /* default mode (this driver supports variable resolution) */
        {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
                .bytesperline = 640,
                .sizeimage = 640 * 480,
                .colorspace = V4L2_COLORSPACE_SRGB},
-       {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 720,
-               .sizeimage = 720 * 576,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 800,
-               .sizeimage = 800 * 600,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 1024,
-               .sizeimage = 1024 * 768,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 1280,
-               .sizeimage = 1280 * 1024,
-               .colorspace = V4L2_COLORSPACE_SRGB},
 };
 
 /* -- read a register -- */
@@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)
                sensor_write(gspca_dev, cfg[i].reg, cfg[i].val);
 
        /* set output size */
-       width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
-       height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
-       if (width <= 640) { /* use context A (half readout speed by default) */
+       width = gspca_dev->pixfmt.width;
+       height = gspca_dev->pixfmt.height;
+       if (width <= 640 && height <= 512) { /* context A (half readout speed)*/
                sensor_write(gspca_dev, 0x1a7, width);
                sensor_write(gspca_dev, 0x1aa, height);
                /* set read mode context A */
                sensor_write(gspca_dev, 0x0c8, 0x0000);
                /* set resize, read mode, vblank, hblank context A */
                sensor_write(gspca_dev, 0x2c8, 0x0000);
-       } else { /* use context B (full readout speed by default) */
+       } else { /* context B (full readout speed) */
                sensor_write(gspca_dev, 0x1a1, width);
                sensor_write(gspca_dev, 0x1a4, height);
                /* set read mode context B */
@@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00);
 
        /* set capture end position */
-       width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
-       height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+       width = gspca_dev->pixfmt.width;
+       height = gspca_dev->pixfmt.height;
        reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff);
        reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8);
        reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff);
@@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
        return 0;
 }
 
+void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt)
+{
+       fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U);
+       fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U);
+       /* round up to even numbers */
+       fmt->fmt.pix.width += (fmt->fmt.pix.width & 1);
+       fmt->fmt.pix.height += (fmt->fmt.pix.height & 1);
+
+       fmt->fmt.pix.bytesperline = fmt->fmt.pix.width;
+       fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height;
+}
+
+int stk1135_enum_framesizes(struct gspca_dev *gspca_dev,
+                       struct v4l2_frmsizeenum *fsize)
+{
+       if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8)
+               return -EINVAL;
+
+       fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+       fsize->stepwise.min_width = 32;
+       fsize->stepwise.min_height = 32;
+       fsize->stepwise.max_width = 1280;
+       fsize->stepwise.max_height = 1024;
+       fsize->stepwise.step_width = 2;
+       fsize->stepwise.step_height = 2;
+
+       return 0;
+}
+
 /* sub-driver description */
 static const struct sd_desc sd_desc = {
        .name = MODULE_NAME,
@@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = {
        .stopN = sd_stopN,
        .pkt_scan = sd_pkt_scan,
        .dq_callback = stk1135_dq_callback,
+       .try_fmt = stk1135_try_fmt,
+       .enum_framesizes = stk1135_enum_framesizes,
 };
 
 /* -- module initialisation -- */
index 55ee7a61c67fb635fe8dbfb2527f178c554c0f6c..49d209bbf9ee08f54e2caa86283205b914a72afd 100644 (file)
@@ -452,7 +452,7 @@ frame_data:
                                        NULL, 0);
 
                        if (sd->bridge == BRIDGE_ST6422)
-                               sd->to_skip = gspca_dev->width * 4;
+                               sd->to_skip = gspca_dev->pixfmt.width * 4;
 
                        if (chunk_len)
                                PERR("Chunk length is "
index 8206b77433006690f3926767290a2802de816611..8d785edcccf2ef040906840f55ea3173e555ecbf 100644 (file)
@@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
 
        /* Number of pixels counted by the sensor when subsampling the pixels.
         * Slightly larger than the real value to avoid oscillation */
-       totalpixels = gspca_dev->width * gspca_dev->height;
+       totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
 
        brightpixels = (totalpixels * val) >> 8;
index af8767a9bd4ccc1a9af62d7f23c69e68a6348f2a..a517d185febed4590bbb5f58306aa4e4a2c4f0f1 100644 (file)
@@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int enable;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index 4cb511ccc5f6ecbefadee9ef64de47f9fd75f3e2..640c2fe760b3c4e13c687eefbde34e156025d648 100644 (file)
@@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
 
        if (sd->bridge == BRIDGE_TP6800) {
                val |= 0x08;            /* grid compensation enable */
-               if (gspca_dev->width == 640)
+               if (gspca_dev->pixfmt.width == 640)
                        reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
                else
                        val |= 0x04;            /* scaling down enable */
@@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
-       if (gspca_dev->width == 320) {
+       if (gspca_dev->pixfmt.width == 320) {
                reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
                msleep(100);
                i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
@@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
 
                /* 640x480 * 30 fps does not work */
                if (i == 6                      /* if 30 fps */
-                && gspca_dev->width == 640)
+                && gspca_dev->pixfmt.width == 640)
                        i = 0x05;               /* 15 fps */
        } else {
                for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
@@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
 
                /* 640x480 * 30 fps does not work */
                if (i == 7                      /* if 30 fps */
-                && gspca_dev->width == 640)
+                && gspca_dev->pixfmt.width == 640)
                        i = 6;                  /* 15 fps */
                i |= 0x80;                      /* clock * 1 */
        }
@@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
 {
        struct sd *sd = (struct sd *) gspca_dev;
 
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width);
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width);
        set_dqt(gspca_dev, sd->quality);
        if (sd->bridge == BRIDGE_TP6800) {
                if (sd->sensor == SENSOR_CX0342)
@@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
                        (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] +
                        (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28])
                                / 8;
-               if (gspca_dev->width == 640)
+               if (gspca_dev->pixfmt.width == 640)
                        luma /= 4;
                reg_w(gspca_dev, 0x7d, 0x00);
 
index 8591324a53e15edc5582d73d9c90715969ffe0f4..d497ba38af0da06149ebb95ec91526c21e6a4604 100644 (file)
@@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
        packet_type0 = packet_type1 = INTER_PACKET;
        if (gspca_dev->empty_packet) {
                gspca_dev->empty_packet = 0;
-               sd->packet = gspca_dev->height / 2;
+               sd->packet = gspca_dev->pixfmt.height / 2;
                packet_type0 = FIRST_PACKET;
        } else if (sd->packet == 0)
                return;                 /* 2 more lines in 352x288 ! */
@@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
         * - 4 bytes
         */
        gspca_frame_add(gspca_dev, packet_type0,
-                       data + 2, gspca_dev->width);
+                       data + 2, gspca_dev->pixfmt.width);
        gspca_frame_add(gspca_dev, packet_type1,
-                       data + gspca_dev->width + 5, gspca_dev->width);
+                       data + gspca_dev->pixfmt.width + 5,
+                       gspca_dev->pixfmt.width);
 }
 
 static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
index a2275cfe0b814625b1bfc9e14144084aed74ca6b..103f6c4236b0789d9eb621eb17efd77a5d46c63e 100644 (file)
@@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
 
        memset(req_data, 0, 16);
        req_data[0] = gain;
-       if (gspca_dev->width == 256)
+       if (gspca_dev->pixfmt.width == 256)
                req_data[1] |= 0x01; /* low nibble x-scale */
-       if (gspca_dev->height <= 122) {
+       if (gspca_dev->pixfmt.height <= 122) {
                req_data[1] |= 0x10; /* high nibble y-scale */
-               unscaled_height = gspca_dev->height * 2;
+               unscaled_height = gspca_dev->pixfmt.height * 2;
        } else
-               unscaled_height = gspca_dev->height;
+               unscaled_height = gspca_dev->pixfmt.height;
        req_data[2] = 0x90; /* unknown, does not seem to do anything */
        if (unscaled_height <= 200)
                req_data[3] = 0x06; /* vend? */
index 2165da0c7ce1570828be04c572a0bcc816353677..fb9fe2ef3a6f60059b7f3fee4e89254b4869d52f 100644 (file)
@@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd)
        #define SC(x) ((x) << 10)
 
        /* Scaling factors */
-       fw = SC(sd->gspca_dev.width) / max_width;
-       fh = SC(sd->gspca_dev.height) / max_height;
+       fw = SC(sd->gspca_dev.pixfmt.width) / max_width;
+       fh = SC(sd->gspca_dev.pixfmt.height) / max_height;
 
-       cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh;
-       ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height;
+       cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh;
+       ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height;
 
        sd->sensor_width = max_width;
        sd->sensor_height = max_height;
@@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd)
 
        w9968cf_set_crop_window(sd);
 
-       reg_w(sd, 0x14, sd->gspca_dev.width);
-       reg_w(sd, 0x15, sd->gspca_dev.height);
+       reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width);
+       reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height);
 
        /* JPEG width & height */
-       reg_w(sd, 0x30, sd->gspca_dev.width);
-       reg_w(sd, 0x31, sd->gspca_dev.height);
+       reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width);
+       reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height);
 
        /* Y & UV frame buffer strides (in WORD) */
        if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
            V4L2_PIX_FMT_JPEG) {
-               reg_w(sd, 0x2c, sd->gspca_dev.width / 2);
-               reg_w(sd, 0x2d, sd->gspca_dev.width / 4);
+               reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2);
+               reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4);
        } else
-               reg_w(sd, 0x2c, sd->gspca_dev.width);
+               reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width);
 
        reg_w(sd, 0x00, 0xbf17); /* reset everything */
        reg_w(sd, 0x00, 0xbf10); /* normal operation */
 
        /* Transfer size in WORDS (for UYVY format only) */
-       val = sd->gspca_dev.width * sd->gspca_dev.height;
+       val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height;
        reg_w(sd, 0x3d, val & 0xffff); /* low bits */
        reg_w(sd, 0x3e, val >> 16);    /* high bits */
 
        if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
            V4L2_PIX_FMT_JPEG) {
                /* We may get called multiple times (usb isoc bw negotiat.) */
-               jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
-                           sd->gspca_dev.width, 0x22); /* JPEG 420 */
+               jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height,
+                           sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */
                jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
                w9968cf_upload_quantizationtables(sd);
                v4l2_ctrl_grab(sd->jpegqual, true);
index 7eaf64eb867cf5ebdef1e55c265cd6f7ca4a6685..a41aa7817c54349e196a4aeee80c60a8adb4a921 100644 (file)
@@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev)
 
        while (clock_div > 3 &&
                        1000 * packet_size >
-                       gspca_dev->width * gspca_dev->height *
+                       gspca_dev->pixfmt.width * gspca_dev->pixfmt.height *
                        fps[clock_div - 1] * 3 / 2)
                clock_div--;
 
        PDEBUG(D_PROBE,
               "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
-              packet_size, gspca_dev->width, gspca_dev->height, clock_div,
-              fps[clock_div]);
+              packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height,
+              clock_div, fps[clock_div]);
 
        return clock_div;
 }
@@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x0002, 0x0426);
        cit_write_reg(gspca_dev, 0x0014, 0x0427);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160: /* 160x120 */
                cit_write_reg(gspca_dev, 0x0004, 0x010b);
                cit_write_reg(gspca_dev, 0x0001, 0x010a);
@@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x00, 0x0101);
        cit_write_reg(gspca_dev, 0x00, 0x010a);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_write_reg(gspca_dev, 0x80, 0x0103);
                cit_write_reg(gspca_dev, 0x60, 0x0105);
@@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
        }
 
        /* Assorted init */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
                cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
@@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x0000, 0x0108);
        cit_write_reg(gspca_dev, 0x0001, 0x0133);
        cit_write_reg(gspca_dev, 0x0001, 0x0102);
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_write_reg(gspca_dev, 0x002c, 0x0103);       /* All except 320x240 */
                cit_write_reg(gspca_dev, 0x0000, 0x0104);       /* Same */
@@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
 
        cit_write_reg(gspca_dev, 0x0000, 0x0100);       /* LED on */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_write_reg(gspca_dev, 0x0050, 0x0111);
                cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
         * Magic control of CMOS sensor. Only lower values like
         * 0-3 work, and picture shifts left or right. Don't change.
         */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
                cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
@@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
         * does not allow arbitrary values and apparently is a bit mask, to
         * be activated only at appropriate time. Don't change it randomly!
         */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
                break;
@@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
        cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
        cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
                cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
@@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
           like with the IBM netcam pro). */
        cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
                cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
@@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0xfffa, 0x0124);
        cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_write_reg(gspca_dev, 0x0070, 0x0119);
                cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
        cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160: /* 160x120 */
                cit_write_reg(gspca_dev, 0x0024, 0x010b);
                cit_write_reg(gspca_dev, 0x0089, 0x0119);
@@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
        struct usb_host_interface *alt;
        int max_packet_size;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                max_packet_size = 450;
                break;
@@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
        int ret, packet_size, min_packet_size;
        struct usb_host_interface *alt;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                min_packet_size = 200;
                break;
@@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
        case CIT_MODEL1:
        case CIT_MODEL3:
        case CIT_IBM_NETCAM_PRO:
-               switch (gspca_dev->width) {
+               switch (gspca_dev->pixfmt.width) {
                case 160: /* 160x120 */
                        byte3 = 0x02;
                        byte4 = 0x0a;
@@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
                                if (data[i] == 0xff) {
                                        if (i >= 4)
                                                PDEBUG(D_FRAM,
-                                                      "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+                                                      "header found at offset: %d: %02x %02x 00 %3ph\n",
                                                       i - 1,
                                                       data[i - 4],
                                                       data[i - 3],
-                                                      data[i],
-                                                      data[i + 1],
-                                                      data[i + 2]);
+                                                      &data[i]);
                                        else
                                                PDEBUG(D_FRAM,
-                                                      "header found at offset: %d: 00 %02x %02x %02x\n",
+                                                      "header found at offset: %d: 00 %3ph\n",
                                                       i - 1,
-                                                      data[i],
-                                                      data[i + 1],
-                                                      data[i + 2]);
+                                                      &data[i]);
                                        return data + i + (sd->sof_len - 1);
                                }
                                break;
index cbfc2f921427cd250556210bdbcf37539c6d5e88..7b95d8e88a20240305a8817b72c3459d80a5adc6 100644 (file)
@@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        };
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
 
        mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
index c43c8d32be40c30e99be96d6fedd5a8fdd3832d9..be77482c30704a5b81615199f563770c0147ec0f 100644 (file)
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
                }
        },
+       {
+               .ident = "T12Rg-H",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
+               }
+       },
        {}
 };
 
index 81695d48c13ebc5ceeb5560a0787aae94a516766..c3bb2502225bc6c8932d4aca36de62f1fd1daee2 100644 (file)
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_MINMAX },
+       /* Microsoft Lifecam NX-3000 */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x045e,
+         .idProduct            = 0x0721,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Microsoft Lifecam VX-7000 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_DEF },
+       /* Dell SP2008WFP Monitor */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x05a9,
+         .idProduct            = 0x2641,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Dell Alienware X51 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
index ddc9379eb2769c308198cb17bc9afde4e4067fb3..4133af01774a3226413c73fbaa02f41aa789e1d6 100644 (file)
@@ -43,7 +43,7 @@
 
 #define UNSET (-1U)
 
-#define PREFIX (t->i2c->driver->driver.name)
+#define PREFIX (t->i2c->dev.driver->name)
 
 /*
  * Driver modprobe parameters
@@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
        }
 
        tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
-                 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+                 c->adapter->name, c->dev.driver->name, c->addr << 1, type,
                  t->mode_mask);
        return;
 
@@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
                int mode_mask;
 
                if (pos->i2c->adapter != adap ||
-                   strcmp(pos->i2c->driver->driver.name, "tuner"))
+                   strcmp(pos->i2c->dev.driver->name, "tuner"))
                        continue;
 
                mode_mask = pos->mode_mask;
index 037d7a55aa8c6c0595987d9142630a6ea2c7f5b7..433d6d77942eeeab78fa7282fdb77ccb3648a0e8 100644 (file)
@@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
        v4l2_subdev_init(sd, ops);
        sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
        /* the owner is the same as the i2c_client's driver owner */
-       sd->owner = client->driver->driver.owner;
+       sd->owner = client->dev.driver->owner;
        sd->dev = &client->dev;
        /* i2c_client and v4l2_subdev point to one another */
        v4l2_set_subdevdata(sd, client);
        i2c_set_clientdata(client, sd);
        /* initialize name */
        snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
-               client->driver->driver.name, i2c_adapter_id(client->adapter),
+               client->dev.driver->name, i2c_adapter_id(client->adapter),
                client->addr);
 }
 EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
@@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
           loaded. This delay-load mechanism doesn't work if other drivers
           want to use the i2c device, so explicitly loading the module
           is the best alternative. */
-       if (client == NULL || client->driver == NULL)
+       if (client == NULL || client->dev.driver == NULL)
                goto error;
 
        /* Lock the module so we can safely get the v4l2_subdev pointer */
-       if (!try_module_get(client->driver->driver.owner))
+       if (!try_module_get(client->dev.driver->owner))
                goto error;
        sd = i2c_get_clientdata(client);
 
@@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
        if (v4l2_device_register_subdev(v4l2_dev, sd))
                sd = NULL;
        /* Decrease the module use count to match the first try_module_get. */
-       module_put(client->driver->driver.owner);
+       module_put(client->dev.driver->owner);
 
 error:
        /* If we have a client but no subdev, then something went wrong and
index 594c75eab5a5d84b42d20ce6e5ba014fe91c0972..812165884f4200cce6265c7ffe76d4fb4c29fe9c 100644 (file)
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 
                        if (b->m.planes[plane].bytesused > length)
                                return -EINVAL;
-                       if (b->m.planes[plane].data_offset >=
+
+                       if (b->m.planes[plane].data_offset > 0 &&
+                           b->m.planes[plane].data_offset >=
                            b->m.planes[plane].bytesused)
                                return -EINVAL;
                }
@@ -1013,6 +1015,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 
                /* Check if the provided plane buffer is large enough */
                if (planes[plane].length < q->plane_sizes[plane]) {
+                       dprintk(1, "qbuf: provided buffer size %u is less than "
+                                               "setup size %u for plane %d\n",
+                                               planes[plane].length,
+                                               q->plane_sizes[plane], plane);
                        ret = -EINVAL;
                        goto err;
                }
@@ -1203,8 +1209,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
        int ret;
 
        ret = __verify_length(vb, b);
-       if (ret < 0)
+       if (ret < 0) {
+               dprintk(1, "%s(): plane parameters verification failed: %d\n",
+                       __func__, ret);
                return ret;
+       }
 
        switch (q->memory) {
        case V4L2_MEMORY_MMAP:
@@ -2467,10 +2476,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
 }
 EXPORT_SYMBOL_GPL(vb2_read);
 
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
                loff_t *ppos, int nonblocking)
 {
-       return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+       return __vb2_perform_fileio(q, (char __user *) data, count,
+                                                       ppos, nonblocking, 0);
 }
 EXPORT_SYMBOL_GPL(vb2_write);
 
@@ -2631,7 +2641,7 @@ int vb2_fop_release(struct file *file)
 }
 EXPORT_SYMBOL_GPL(vb2_fop_release);
 
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos)
 {
        struct video_device *vdev = video_devdata(file);
index fd56f25632018fde92f1acefc4d284e6a47d6dba..646f08f4f504c05ae37dd95cbc1fe8333705e658 100644 (file)
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
        return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
 }
 
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+       struct vm_area_struct *vma, unsigned long *res)
+{
+       unsigned long pfn, start_pfn, prev_pfn;
+       unsigned int i;
+       int ret;
+
+       if (!vma_is_io(vma))
+               return -EFAULT;
+
+       ret = follow_pfn(vma, start, &pfn);
+       if (ret)
+               return ret;
+
+       start_pfn = pfn;
+       start += PAGE_SIZE;
+
+       for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+               prev_pfn = pfn;
+               ret = follow_pfn(vma, start, &pfn);
+
+               if (ret) {
+                       pr_err("no page for address %lu\n", start);
+                       return ret;
+               }
+               if (pfn != prev_pfn + 1)
+                       return -EINVAL;
+       }
+
+       *res = start_pfn;
+       return 0;
+}
+
 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
        int n_pages, struct vm_area_struct *vma, int write)
 {
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
                        unsigned long pfn;
                        int ret = follow_pfn(vma, start, &pfn);
 
+                       if (!pfn_valid(pfn))
+                               return -EINVAL;
+
                        if (ret) {
                                pr_err("no page for address %lu\n", start);
                                return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
        struct vb2_dc_buf *buf = buf_priv;
        struct sg_table *sgt = buf->dma_sgt;
 
-       dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
-       if (!vma_is_io(buf->vma))
-               vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+       if (sgt) {
+               dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+               if (!vma_is_io(buf->vma))
+                       vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
 
-       sg_free_table(sgt);
-       kfree(sgt);
+               sg_free_table(sgt);
+               kfree(sgt);
+       }
        vb2_put_vma(buf->vma);
        kfree(buf);
 }
 
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       /* really, we cannot do anything better at this point */
+       return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        unsigned long size, int write)
 {
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        /* extract page list from userspace mapping */
        ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
        if (ret) {
+               unsigned long pfn;
+               if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+                       buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+                       buf->size = size;
+                       kfree(pages);
+                       return buf;
+               }
+
                pr_err("failed to get user pages\n");
                goto fail_vma;
        }
index 16ae3dcc7e294cdb4722d0991b31b9e6ac7cacf0..2f860543912cd1c3f5dd4f286705b2c8b208dc37 100644 (file)
@@ -35,17 +35,61 @@ struct vb2_dma_sg_buf {
        struct page                     **pages;
        int                             write;
        int                             offset;
-       struct vb2_dma_sg_desc          sg_desc;
+       struct sg_table                 sg_table;
+       size_t                          size;
+       unsigned int                    num_pages;
        atomic_t                        refcount;
        struct vb2_vmarea_handler       handler;
 };
 
 static void vb2_dma_sg_put(void *buf_priv);
 
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+               gfp_t gfp_flags)
+{
+       unsigned int last_page = 0;
+       int size = buf->size;
+
+       while (size > 0) {
+               struct page *pages;
+               int order;
+               int i;
+
+               order = get_order(size);
+               /* Dont over allocate*/
+               if ((PAGE_SIZE << order) > size)
+                       order--;
+
+               pages = NULL;
+               while (!pages) {
+                       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+                                       __GFP_NOWARN | gfp_flags, order);
+                       if (pages)
+                               break;
+
+                       if (order == 0) {
+                               while (last_page--)
+                                       __free_page(buf->pages[last_page]);
+                               return -ENOMEM;
+                       }
+                       order--;
+               }
+
+               split_page(pages, order);
+               for (i = 0; i < (1 << order); i++)
+                       buf->pages[last_page++] = &pages[i];
+
+               size -= PAGE_SIZE << order;
+       }
+
+       return 0;
+}
+
 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
 {
        struct vb2_dma_sg_buf *buf;
-       int i;
+       int ret;
+       int num_pages;
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
@@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
        buf->vaddr = NULL;
        buf->write = 0;
        buf->offset = 0;
-       buf->sg_desc.size = size;
+       buf->size = size;
        /* size is already page aligned */
-       buf->sg_desc.num_pages = size >> PAGE_SHIFT;
+       buf->num_pages = size >> PAGE_SHIFT;
 
-       buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
-                                     sizeof(*buf->sg_desc.sglist));
-       if (!buf->sg_desc.sglist)
-               goto fail_sglist_alloc;
-       sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
-       buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+       buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
                             GFP_KERNEL);
        if (!buf->pages)
                goto fail_pages_array_alloc;
 
-       for (i = 0; i < buf->sg_desc.num_pages; ++i) {
-               buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
-                                          __GFP_NOWARN | gfp_flags);
-               if (NULL == buf->pages[i])
-                       goto fail_pages_alloc;
-               sg_set_page(&buf->sg_desc.sglist[i],
-                           buf->pages[i], PAGE_SIZE, 0);
-       }
+       ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+       if (ret)
+               goto fail_pages_alloc;
+
+       ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+                       buf->num_pages, 0, size, gfp_flags);
+       if (ret)
+               goto fail_table_alloc;
 
        buf->handler.refcount = &buf->refcount;
        buf->handler.put = vb2_dma_sg_put;
@@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
        atomic_inc(&buf->refcount);
 
        dprintk(1, "%s: Allocated buffer of %d pages\n",
-               __func__, buf->sg_desc.num_pages);
+               __func__, buf->num_pages);
        return buf;
 
+fail_table_alloc:
+       num_pages = buf->num_pages;
+       while (num_pages--)
+               __free_page(buf->pages[num_pages]);
 fail_pages_alloc:
-       while (--i >= 0)
-               __free_page(buf->pages[i]);
        kfree(buf->pages);
-
 fail_pages_array_alloc:
-       vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
        kfree(buf);
        return NULL;
 }
@@ -104,14 +140,14 @@ fail_sglist_alloc:
 static void vb2_dma_sg_put(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
-       int i = buf->sg_desc.num_pages;
+       int i = buf->num_pages;
 
        if (atomic_dec_and_test(&buf->refcount)) {
                dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
-                       buf->sg_desc.num_pages);
+                       buf->num_pages);
                if (buf->vaddr)
-                       vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
-               vfree(buf->sg_desc.sglist);
+                       vm_unmap_ram(buf->vaddr, buf->num_pages);
+               sg_free_table(&buf->sg_table);
                while (--i >= 0)
                        __free_page(buf->pages[i]);
                kfree(buf->pages);
@@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
 {
        struct vb2_dma_sg_buf *buf;
        unsigned long first, last;
-       int num_pages_from_user, i;
+       int num_pages_from_user;
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
@@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
        buf->vaddr = NULL;
        buf->write = write;
        buf->offset = vaddr & ~PAGE_MASK;
-       buf->sg_desc.size = size;
+       buf->size = size;
 
        first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
        last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
-       buf->sg_desc.num_pages = last - first + 1;
-
-       buf->sg_desc.sglist = vzalloc(
-               buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
-       if (!buf->sg_desc.sglist)
-               goto userptr_fail_sglist_alloc;
-
-       sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+       buf->num_pages = last - first + 1;
 
-       buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+       buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
                             GFP_KERNEL);
        if (!buf->pages)
-               goto userptr_fail_pages_array_alloc;
+               return NULL;
 
        num_pages_from_user = get_user_pages(current, current->mm,
                                             vaddr & PAGE_MASK,
-                                            buf->sg_desc.num_pages,
+                                            buf->num_pages,
                                             write,
                                             1, /* force */
                                             buf->pages,
                                             NULL);
 
-       if (num_pages_from_user != buf->sg_desc.num_pages)
+       if (num_pages_from_user != buf->num_pages)
                goto userptr_fail_get_user_pages;
 
-       sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
-                   PAGE_SIZE - buf->offset, buf->offset);
-       size -= PAGE_SIZE - buf->offset;
-       for (i = 1; i < buf->sg_desc.num_pages; ++i) {
-               sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
-                           min_t(size_t, PAGE_SIZE, size), 0);
-               size -= min_t(size_t, PAGE_SIZE, size);
-       }
+       if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+                       buf->num_pages, buf->offset, size, 0))
+               goto userptr_fail_alloc_table_from_pages;
+
        return buf;
 
+userptr_fail_alloc_table_from_pages:
 userptr_fail_get_user_pages:
        dprintk(1, "get_user_pages requested/got: %d/%d]\n",
-              num_pages_from_user, buf->sg_desc.num_pages);
+              num_pages_from_user, buf->num_pages);
        while (--num_pages_from_user >= 0)
                put_page(buf->pages[num_pages_from_user]);
        kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
-       vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
        kfree(buf);
        return NULL;
 }
@@ -194,18 +215,18 @@ userptr_fail_sglist_alloc:
 static void vb2_dma_sg_put_userptr(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
-       int i = buf->sg_desc.num_pages;
+       int i = buf->num_pages;
 
        dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
-              __func__, buf->sg_desc.num_pages);
+              __func__, buf->num_pages);
        if (buf->vaddr)
-               vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+               vm_unmap_ram(buf->vaddr, buf->num_pages);
+       sg_free_table(&buf->sg_table);
        while (--i >= 0) {
                if (buf->write)
                        set_page_dirty_lock(buf->pages[i]);
                put_page(buf->pages[i]);
        }
-       vfree(buf->sg_desc.sglist);
        kfree(buf->pages);
        kfree(buf);
 }
@@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
 
        if (!buf->vaddr)
                buf->vaddr = vm_map_ram(buf->pages,
-                                       buf->sg_desc.num_pages,
+                                       buf->num_pages,
                                        -1,
                                        PAGE_KERNEL);
 
@@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
 
-       return &buf->sg_desc;
+       return &buf->sg_table;
 }
 
 const struct vb2_mem_ops vb2_dma_sg_memops = {
index 5d4fd69d04ca05f1b189e458fdbacd4e54e3a0b7..4ef01ab67853289ba7c1db81e5d7ec5464333401 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/jiffies.h>
 #include <linux/of.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 
 /*
  * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
index d0fdc134068a05ed9967fe98ca7d9a6d6429e710..f6ff711aa5bbbaa4a177dbd416ccdab53017bb23 100644 (file)
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
        dev->iamthif_ioctl = false;
        dev->iamthif_state = MEI_IAMTHIF_IDLE;
        dev->iamthif_timer = 0;
+       dev->iamthif_stall_timer = 0;
 }
 
 /**
index 6d0282c08a06cdbe6233a379ee575882b1d7a525..cd2033cd7120d7631fa8d345409ed9ea86259799 100644 (file)
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 
        if (cl->reading_state != MEI_READ_COMPLETE &&
            !waitqueue_active(&cl->rx_wait)) {
+
                mutex_unlock(&dev->device_lock);
 
                if (wait_event_interruptible(cl->rx_wait,
-                               (MEI_READ_COMPLETE == cl->reading_state))) {
+                               cl->reading_state == MEI_READ_COMPLETE  ||
+                               mei_cl_is_transitioning(cl))) {
+
                        if (signal_pending(current))
                                return -EINTR;
                        return -ERESTARTSYS;
index 9eb031e920701e8ad8feadf3bec2900b182e4a5c..892cc4207fa202629e27698c0ae536f0b81880b0 100644 (file)
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
                cl->dev->dev_state == MEI_DEV_ENABLED &&
                cl->state == MEI_FILE_CONNECTED);
 }
+static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
+{
+       return (MEI_FILE_INITIALIZING == cl->state ||
+               MEI_FILE_DISCONNECTED == cl->state ||
+               MEI_FILE_DISCONNECTING == cl->state);
+}
 
 bool mei_cl_is_other_connecting(struct mei_cl *cl);
 int mei_cl_disconnect(struct mei_cl *cl);
index 6127ab64bb399323e57e418e9742c2f5b8a9d86d..0a0448326e9d583f951932b220d90186c59f92b7 100644 (file)
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
        struct mei_me_client *clients;
        int b;
 
+       dev->me_clients_num = 0;
+       dev->me_client_presentation_num = 0;
+       dev->me_client_index = 0;
+
        /* count how many ME clients we have */
        for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
                dev->me_clients_num++;
 
-       if (dev->me_clients_num <= 0)
+       if (dev->me_clients_num == 0)
                return;
 
        kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
        struct hbm_props_request *prop_req;
        const size_t len = sizeof(struct hbm_props_request);
        unsigned long next_client_index;
-       u8 client_num;
+       unsigned long client_num;
 
 
        client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
                if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
                    dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
                                dev->init_clients_timer = 0;
-                               dev->me_client_presentation_num = 0;
-                               dev->me_client_index = 0;
                                mei_hbm_me_cl_allocate(dev);
                                dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
 
index 92c73118b13c450149e21ec48b990e6329f4e5b1..6197018e2f16a24296619442fe6c8744c7c55b88 100644 (file)
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
                memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
        }
 
+       /* we're already in reset, cancel the init timer */
+       dev->init_clients_timer = 0;
+
        dev->me_clients_num = 0;
        dev->rd_msg_hdr = 0;
        dev->wd_pending = false;
index 173ff095be0dd6747145c9653726b3e00f85cd81..cabeddd66c1f406f73f7e5bed2a56b7e52cd7621 100644 (file)
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
                mutex_unlock(&dev->device_lock);
 
                if (wait_event_interruptible(cl->rx_wait,
-                       (MEI_READ_COMPLETE == cl->reading_state ||
-                        MEI_FILE_INITIALIZING == cl->state ||
-                        MEI_FILE_DISCONNECTED == cl->state ||
-                        MEI_FILE_DISCONNECTING == cl->state))) {
+                               MEI_READ_COMPLETE == cl->reading_state ||
+                               mei_cl_is_transitioning(cl))) {
+
                        if (signal_pending(current))
                                return -EINTR;
                        return -ERESTARTSYS;
                }
 
                mutex_lock(&dev->device_lock);
-               if (MEI_FILE_INITIALIZING == cl->state ||
-                   MEI_FILE_DISCONNECTED == cl->state ||
-                   MEI_FILE_DISCONNECTING == cl->state) {
+               if (mei_cl_is_transitioning(cl)) {
                        rets = -EBUSY;
                        goto out;
                }
index 7b918b2fb89468ad6c653a3ff9e0f616aefa5022..456b322013e269fc61f911d6f5761768c2863d4f 100644 (file)
@@ -396,9 +396,9 @@ struct mei_device {
        struct mei_me_client *me_clients; /* Note: memory has to be allocated */
        DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
        DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
-       u8 me_clients_num;
-       u8 me_client_presentation_num;
-       u8 me_client_index;
+       unsigned long me_clients_num;
+       unsigned long me_client_presentation_num;
+       unsigned long me_client_index;
 
        struct mei_cl wd_cl;
        enum mei_wd_states wd_state;
index fa9632eb63f14cc9af971f290989f13263c3cbfd..357bbc54fe4b6f423aa2dcc86ca3a23624748a6b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-               limit = *mmc_dev(host)->dma_mask;
+               limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
        mq->card = card;
        mq->queue = blk_init_queue(mmc_request_fn, lock);
index c3785edc0e92c851d3c36a0481a1b2936f92fc63..d135c76c4855b825175370e215979bd528e4b2d4 100644 (file)
@@ -62,6 +62,7 @@ static unsigned int fmax = 515633;
  * @signal_direction: input/out direction of bus signals can be indicated
  * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
  * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
  */
 struct variant_data {
        unsigned int            clkreg;
@@ -76,6 +77,7 @@ struct variant_data {
        bool                    signal_direction;
        bool                    pwrreg_clkgate;
        bool                    busy_detect;
+       bool                    pwrreg_nopower;
 };
 
 static struct variant_data variant_arm = {
@@ -109,6 +111,7 @@ static struct variant_data variant_u300 = {
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_nomadik = {
@@ -121,6 +124,7 @@ static struct variant_data variant_nomadik = {
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_ux500 = {
@@ -135,6 +139,7 @@ static struct variant_data variant_ux500 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_ux500v2 = {
@@ -150,6 +155,7 @@ static struct variant_data variant_ux500v2 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .pwrreg_nopower         = true,
 };
 
 static int mmci_card_busy(struct mmc_host *mmc)
@@ -189,6 +195,21 @@ static int mmci_validate_data(struct mmci_host *host,
        return 0;
 }
 
+static void mmci_reg_delay(struct mmci_host *host)
+{
+       /*
+        * According to the spec, at least three feedback clock cycles
+        * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+        * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+        * Worst delay time during card init is at 100 kHz => 30 us.
+        * Worst delay time when up and running is at 25 MHz => 120 ns.
+        */
+       if (host->cclk < 25000000)
+               udelay(30);
+       else
+               ndelay(120);
+}
+
 /*
  * This must be called with host->lock held
  */
@@ -1264,6 +1285,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        mmci_set_clkreg(host, ios->clock);
        mmci_write_pwrreg(host, pwr);
+       mmci_reg_delay(host);
 
        spin_unlock_irqrestore(&host->lock, flags);
 
@@ -1510,23 +1532,6 @@ static int mmci_probe(struct amba_device *dev,
                mmc->f_max = min(host->mclk, fmax);
        dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
 
-       host->pinctrl = devm_pinctrl_get(&dev->dev);
-       if (IS_ERR(host->pinctrl)) {
-               ret = PTR_ERR(host->pinctrl);
-               goto clk_disable;
-       }
-
-       host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                       PINCTRL_STATE_DEFAULT);
-
-       /* enable pins to be muxed in and configured */
-       if (!IS_ERR(host->pins_default)) {
-               ret = pinctrl_select_state(host->pinctrl, host->pins_default);
-               if (ret)
-                       dev_warn(&dev->dev, "could not set default pins\n");
-       } else
-               dev_warn(&dev->dev, "could not get default pinstate\n");
-
        /* Get regulators and the supported OCR mask */
        mmc_regulator_get_supply(mmc);
        if (!mmc->ocr_avail)
@@ -1760,6 +1765,41 @@ static int mmci_resume(struct device *dev)
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
+static void mmci_save(struct mmci_host *host)
+{
+       unsigned long flags;
+
+       if (host->variant->pwrreg_nopower) {
+               spin_lock_irqsave(&host->lock, flags);
+
+               writel(0, host->base + MMCIMASK0);
+               writel(0, host->base + MMCIDATACTRL);
+               writel(0, host->base + MMCIPOWER);
+               writel(0, host->base + MMCICLOCK);
+               mmci_reg_delay(host);
+
+               spin_unlock_irqrestore(&host->lock, flags);
+       }
+
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+       unsigned long flags;
+
+       if (host->variant->pwrreg_nopower) {
+               spin_lock_irqsave(&host->lock, flags);
+
+               writel(host->clk_reg, host->base + MMCICLOCK);
+               writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+               writel(host->pwr_reg, host->base + MMCIPOWER);
+               writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+               mmci_reg_delay(host);
+
+               spin_unlock_irqrestore(&host->lock, flags);
+       }
+}
+
 static int mmci_runtime_suspend(struct device *dev)
 {
        struct amba_device *adev = to_amba_device(dev);
@@ -1767,6 +1807,8 @@ static int mmci_runtime_suspend(struct device *dev)
 
        if (mmc) {
                struct mmci_host *host = mmc_priv(mmc);
+               pinctrl_pm_select_sleep_state(dev);
+               mmci_save(host);
                clk_disable_unprepare(host->clk);
        }
 
@@ -1781,6 +1823,8 @@ static int mmci_runtime_resume(struct device *dev)
        if (mmc) {
                struct mmci_host *host = mmc_priv(mmc);
                clk_prepare_enable(host->clk);
+               mmci_restore(host);
+               pinctrl_pm_select_default_state(dev);
        }
 
        return 0;
index 69080fab637520af2a9ea8b4a6ba49ae52b0344f..168bc72f7a94a9b662d7c0a97775c781d4d769aa 100644 (file)
@@ -200,10 +200,6 @@ struct mmci_host {
        struct sg_mapping_iter  sg_miter;
        unsigned int            size;
 
-       /* pinctrl handles */
-       struct pinctrl          *pinctrl;
-       struct pinctrl_state    *pins_default;
-
 #ifdef CONFIG_DMA_ENGINE
        /* DMA stuff */
        struct dma_chan         *dma_current;
index cdd4ce0d7c90c91526cb447996c2607603bb0195..ef19874fcd1f4dcd8ab6d9821091f542021ccc8f 100644 (file)
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                        dma_mask = DMA_BIT_MASK(32);
                }
 
-               dev->dma_mask = &dev->coherent_dma_mask;
-               dev->coherent_dma_mask = dma_mask;
+               err = dma_coerce_mask_and_coherent(dev, dma_mask);
+               if (err)
+                       goto err_free;
        }
 
        if (c->slot) {
index 87ed3fb5149ace85aef3338526741e5010ee1d9d..f344659dceac2739f47e032cbaaec2c54c86b6f6 100644 (file)
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
 };
 
 static const struct of_device_id sh_mobile_sdhi_of_match[] = {
-       { .compatible = "renesas,shmobile-sdhi" },
-       { .compatible = "renesas,sh7372-sdhi" },
-       { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
-       { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
-       { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
-       { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
-       { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
-       { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-shmobile" },
+       { .compatible = "renesas,sdhi-sh7372" },
+       { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
+       { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
        {},
 };
 MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
index 26b14f9fcac6d129c3a2bc5c7a9fa7baea234691..6bc9618af0942528d67e7846810adf7c2288b75a 100644 (file)
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash)
  */
 static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
 {
+       int status;
+       bool need_wren = false;
+
        switch (JEDEC_MFR(jedec_id)) {
-       case CFI_MFR_MACRONIX:
        case CFI_MFR_ST: /* Micron, actually */
+               /* Some Micron need WREN command; all will accept it */
+               need_wren = true;
+       case CFI_MFR_MACRONIX:
        case 0xEF /* winbond */:
+               if (need_wren)
+                       write_enable(flash);
+
                flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
-               return spi_write(flash->spi, flash->command, 1);
+               status = spi_write(flash->spi, flash->command, 1);
+
+               if (need_wren)
+                       write_disable(flash);
+
+               return status;
        default:
                /* Spansion style */
                flash->command[0] = OPCODE_BRWR;
index 7ed4841327f2d7668e51c75645628f2127589f82..d340b2f198c614b72fc68eed945a8c1a662d147c 100644 (file)
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
 
        len = le16_to_cpu(p->ext_param_page_length) * 16;
        ep = kmalloc(len, GFP_KERNEL);
-       if (!ep) {
-               ret = -ENOMEM;
-               goto ext_out;
-       }
+       if (!ep)
+               return -ENOMEM;
 
        /* Send our own NAND_CMD_PARAM. */
        chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
@@ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
        }
 
        pr_info("ONFI extended param page detected.\n");
-       return 0;
+       ret = 0;
 
 ext_out:
        kfree(ep);
index bdc1d15369f844bfce700a9794b4f8eb558fa921..414f5225ae8b2ece87f1044e6c2a293ae481a773 100644 (file)
@@ -575,12 +575,12 @@ static int alloc_device(struct nandsim *ns)
                cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
                if (IS_ERR(cfile))
                        return PTR_ERR(cfile);
-               if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+               if (!file_readable(cfile)) {
                        NS_ERR("alloc_device: cache file not readable\n");
                        err = -EINVAL;
                        goto err_close;
                }
-               if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+               if (!file_writable(cfile)) {
                        NS_ERR("alloc_device: cache file not writeable\n");
                        err = -EINVAL;
                        goto err_close;
index 5db900d917f92434f5941a393bcb9045ee99bd6c..dd03dfdfb0d65e0ded1b0a124221663b586482b5 100644 (file)
@@ -1236,7 +1236,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
 static struct of_device_id pxa3xx_nand_dt_ids[] = {
        {
                .compatible = "marvell,pxa3xx-nand",
@@ -1284,12 +1283,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 
        return 0;
 }
-#else
-static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
-       return 0;
-}
-#endif
 
 static int pxa3xx_nand_probe(struct platform_device *pdev)
 {
index c071d410488f4c95a25ef4a4fedfc325b5ff83c9..33bb1f2b63e4f323f0c0c9494b12a1ce3e46d1e2 100644 (file)
@@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                 * number.
                 */
                image_seq = be32_to_cpu(ech->image_seq);
-               if (!ubi->image_seq && image_seq)
+               if (!ubi->image_seq)
                        ubi->image_seq = image_seq;
-               if (ubi->image_seq && image_seq &&
-                   ubi->image_seq != image_seq) {
+               if (image_seq && ubi->image_seq != image_seq) {
                        ubi_err("bad image sequence number %d in PEB %d, expected %d",
                                image_seq, pnum, ubi->image_seq);
                        ubi_dump_ec_hdr(ech);
@@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
                                ai = alloc_ai("ubi_aeb_slab_cache2");
                                if (!ai)
                                        return -ENOMEM;
-                       }
 
-                       err = scan_all(ubi, ai, UBI_FM_MAX_START);
+                               err = scan_all(ubi, ai, 0);
+                       } else {
+                               err = scan_all(ubi, ai, UBI_FM_MAX_START);
+                       }
                }
        }
 #else
index f5aa4b02cfa676f3270dc9b9a87bd87ef031893d..ead861307b3c57aac13bbb187a958a8910062904 100644 (file)
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
         */
        for (i = 0; i < pool_size; i++) {
                int scrub = 0;
+               int image_seq;
 
                pnum = be32_to_cpu(pebs[i]);
 
@@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                } else if (ret == UBI_IO_BITFLIPS)
                        scrub = 1;
 
-               if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+               /*
+                * Older UBI implementations have image_seq set to zero, so
+                * we shouldn't fail if image_seq == 0.
+                */
+               image_seq = be32_to_cpu(ech->image_seq);
+
+               if (image_seq && (image_seq != ubi->image_seq)) {
                        ubi_err("bad image seq: 0x%x, expected: 0x%x",
                                be32_to_cpu(ech->image_seq), ubi->image_seq);
-                       err = UBI_BAD_FASTMAP;
+                       ret = UBI_BAD_FASTMAP;
                        goto out;
                }
 
@@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
        list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
                list_move_tail(&tmp_aeb->u.list, &ai->free);
 
+       ubi_assert(list_empty(&used));
+       ubi_assert(list_empty(&eba_orphans));
+       ubi_assert(list_empty(&free));
+
        /*
         * If fastmap is leaking PEBs (must not happen), raise a
         * fat warning and fall back to scanning mode.
@@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
 fail_bad:
        ret = UBI_BAD_FASTMAP;
 fail:
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+
        return ret;
 }
 
@@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
        }
 
        for (i = 0; i < used_blocks; i++) {
+               int image_seq;
+
                pnum = be32_to_cpu(fmsb->block_loc[i]);
 
                if (ubi_io_is_bad(ubi, pnum)) {
@@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
                } else if (ret == UBI_IO_BITFLIPS)
                        fm->to_be_tortured[i] = 1;
 
+               image_seq = be32_to_cpu(ech->image_seq);
                if (!ubi->image_seq)
-                       ubi->image_seq = be32_to_cpu(ech->image_seq);
+                       ubi->image_seq = image_seq;
 
-               if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+               /*
+                * Older UBI implementations have image_seq set to zero, so
+                * we shouldn't fail if image_seq == 0.
+                */
+               if (image_seq && (image_seq != ubi->image_seq)) {
+                       ubi_err("wrong image seq:%d instead of %d",
+                               be32_to_cpu(ech->image_seq), ubi->image_seq);
                        ret = UBI_BAD_FASTMAP;
                        goto free_hdr;
                }
index c95bfb183c62b185f2f6cc17b5d84245bdd2d13d..02317c1c02385914c94175fa8757089c677e2b94 100644 (file)
@@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)
        return_unused_pool_pebs(ubi, pool);
 
        for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
-               if (!ubi->free.rb_node ||
-                  (ubi->free_count - ubi->beb_rsvd_pebs < 1))
-                       break;
-
                pool->pebs[pool->size] = __wl_get_peb(ubi);
                if (pool->pebs[pool->size] < 0)
                        break;
index 3a8c7532ee0d23ed1e896fe94ff017e9cbb82738..a7271e093845fabe111934ea83f7d2ab9588d44e 100644 (file)
@@ -102,8 +102,7 @@ static struct devprobe2 isa_probes[] __initdata = {
 #ifdef CONFIG_WD80x3
        {wd_probe, 0},
 #endif
-#if defined(CONFIG_NE2000) || \
-    defined(CONFIG_NE_H8300)  /* ISA (use ne2k-pci for PCI cards) */
+#if defined(CONFIG_NE2000) /* ISA (use ne2k-pci for PCI cards) */
        {ne_probe, 0},
 #endif
 #ifdef CONFIG_LANCE            /* ISA/VLB (use pcnet32 for PCI cards) */
index 4c21bf6b8b2f015abfaf77b414d3e5b7670026f2..5a5d720da9292e787b4a2cdb94e33bbe883e8920 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_BONDING) += bonding.o
 
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
 
 proc-$(CONFIG_PROC_FS) += bond_procfs.o
 bonding-objs += $(proc-y)
index 0d8f427ade938c75052d804138cd377b02c34f99..187b1b7772ef1b873303fc46998a591137bec7b7 100644 (file)
@@ -135,41 +135,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
        return bond_get_bond_by_slave(port->slave);
 }
 
-/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
-       struct slave *first_slave = bond_first_slave(bond);
-
-       return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
-       struct bonding *bond = __get_bond_by_port(port);
-       struct slave *slave = port->slave, *slave_next;
-
-       // If there's no bond for this port, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).port);
-}
-
 /**
  * __get_first_agg - get the first aggregator in the bond
  * @bond: the bond we're looking at
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
        return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
 }
 
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
-       struct slave *slave = aggregator->slave, *slave_next;
-       struct bonding *bond = bond_get_bond_by_slave(slave);
-
-       // If there's no bond for this aggregator, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
 /*
  * __agg_has_partner
  *
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
  */
 static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 {
-       struct aggregator *retval = NULL;
+       struct bonding *bond = aggregator->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
 
-       for (; aggregator; aggregator = __get_next_agg(aggregator)) {
-               if (aggregator->is_active) {
-                       retval = aggregator;
-                       break;
-               }
-       }
+       bond_for_each_slave(bond, slave, iter)
+               if (SLAVE_AD_INFO(slave).aggregator.is_active)
+                       return &(SLAVE_AD_INFO(slave).aggregator);
 
-       return retval;
+       return NULL;
 }
 
 /**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
 {
        struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
        struct port *last_port = NULL, *curr_port;
+       struct list_head *iter;
+       struct bonding *bond;
+       struct slave *slave;
        int found = 0;
 
        // if the port is already Selected, do nothing
        if (port->sm_vars & AD_PORT_SELECTED)
                return;
 
+       bond = __get_bond_by_port(port);
+
        // if the port is connected to other aggregator, detach it
        if (port->aggregator) {
                // detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
                }
        }
        // search on all aggregators for a suitable aggregator for this port
-       for (aggregator = __get_first_agg(port); aggregator;
-            aggregator = __get_next_agg(aggregator)) {
+       bond_for_each_slave(bond, slave, iter) {
+               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
                // keep a free aggregator for later use(if needed)
                if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
 static void ad_agg_selection_logic(struct aggregator *agg)
 {
        struct aggregator *best, *active, *origin;
+       struct bonding *bond = agg->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
        origin = agg;
        active = __get_active_agg(agg);
        best = (active && agg_device_up(active)) ? active : NULL;
 
-       do {
+       bond_for_each_slave(bond, slave, iter) {
+               agg = &(SLAVE_AD_INFO(slave).aggregator);
+
                agg->is_active = 0;
 
                if (agg->num_of_ports && agg_device_up(agg))
                        best = ad_agg_selection_test(best, agg);
-
-       } while ((agg = __get_next_agg(agg)));
+       }
 
        if (best &&
            __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                         best->lag_ports, best->slave,
                         best->slave ? best->slave->dev->name : "NULL");
 
-               for (agg = __get_first_agg(best->lag_ports); agg;
-                    agg = __get_next_agg(agg)) {
+               bond_for_each_slave(bond, slave, iter) {
+                       agg = &(SLAVE_AD_INFO(slave).aggregator);
 
                        pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
                                 agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                }
        }
 
-       if (origin->slave) {
-               struct bonding *bond;
-
-               bond = bond_get_bond_by_slave(origin->slave);
-               if (bond)
-                       bond_3ad_set_carrier(bond);
-       }
+       bond_3ad_set_carrier(bond);
 }
 
 /**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
        struct port *port, *prev_port, *temp_port;
        struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
        int select_new_active_agg = 0;
+       struct bonding *bond = slave->bond;
+       struct slave *slave_iter;
+       struct list_head *iter;
 
        // find the aggregator related to this slave
        aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
                // reason to search for new aggregator, and that we will find one
                if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
                        // find new aggregator for the related port(s)
-                       new_aggregator = __get_first_agg(port);
-                       for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+                       bond_for_each_slave(bond, slave_iter, iter) {
+                               new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                                // if the new aggregator is empty, or it is connected to our port only
                                if (!new_aggregator->lag_ports
                                    || ((new_aggregator->lag_ports == port)
                                        && !new_aggregator->lag_ports->next_port_in_aggregator))
                                        break;
                        }
+                       if (!slave_iter)
+                               new_aggregator = NULL;
                        // if new aggregator found, copy the aggregator's parameters
                        // and connect the related lag_ports to the new aggregator
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                pr_info("%s: Removing an active aggregator\n",
                                        slave->bond->dev->name);
                                // select new active aggregator
-                               ad_agg_selection_logic(__get_first_agg(port));
+                               temp_aggregator = __get_first_agg(port);
+                               if (temp_aggregator)
+                                       ad_agg_selection_logic(temp_aggregator);
                        }
                }
        }
 
        pr_debug("Unbinding port %d\n", port->actor_port_number);
        // find the aggregator that this port is connected to
-       temp_aggregator = __get_first_agg(port);
-       for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+       bond_for_each_slave(bond, slave_iter, iter) {
+               temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                prev_port = NULL;
                // search the port in the aggregator's related ports
                for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
                                            ad_work.work);
-       struct port *port;
        struct aggregator *aggregator;
+       struct list_head *iter;
+       struct slave *slave;
+       struct port *port;
 
        read_lock(&bond->lock);
 
        //check if there are any slaves
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        // check if agg_select_timer timer after initialize is timed out
        if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+               slave = bond_first_slave(bond);
+               port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
                // select the active aggregator for the bond
-               if ((port = __get_first_port(bond))) {
+               if (port) {
                        if (!port->slave) {
                                pr_warning("%s: Warning: bond's first port is uninitialized\n",
                                           bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        }
 
        // for each port run the state machines
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (!port->slave) {
                        pr_warning("%s: Warning: Found an uninitialized port\n",
                                   bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
                                   struct ad_info *ad_info)
 {
        struct aggregator *aggregator = NULL;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (port->aggregator && port->aggregator->is_active) {
                        aggregator = port->aggregator;
                        break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
 {
        int ret;
 
-       read_lock(&bond->lock);
+       rcu_read_lock();
        ret = __bond_3ad_get_active_agg_info(bond, ad_info);
-       read_unlock(&bond->lock);
+       rcu_read_unlock();
 
        return ret;
 }
 
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
-       struct slave *slave, *start_at;
        struct bonding *bond = netdev_priv(dev);
-       int slave_agg_no;
-       int slaves_in_agg;
-       int agg_id;
-       int i;
+       struct slave *slave, *first_ok_slave;
+       struct aggregator *agg;
        struct ad_info ad_info;
+       struct list_head *iter;
+       int slaves_in_agg;
+       int slave_agg_no;
        int res = 1;
+       int agg_id;
 
-       read_lock(&bond->lock);
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
                         dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        agg_id = ad_info.aggregator_id;
 
        if (slaves_in_agg == 0) {
-               /*the aggregator is empty*/
                pr_debug("%s: Error: active aggregator is empty\n", dev->name);
                goto out;
        }
 
-       slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+       slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+       first_ok_slave = NULL;
 
-       bond_for_each_slave(bond, slave) {
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               if (!agg || agg->aggregator_identifier != agg_id)
+                       continue;
 
-               if (agg && (agg->aggregator_identifier == agg_id)) {
+               if (slave_agg_no >= 0) {
+                       if (!first_ok_slave && SLAVE_IS_OK(slave))
+                               first_ok_slave = slave;
                        slave_agg_no--;
-                       if (slave_agg_no < 0)
-                               break;
+                       continue;
+               }
+
+               if (SLAVE_IS_OK(slave)) {
+                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
+                       goto out;
                }
        }
 
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       start_at = slave;
-
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               int slave_agg_id = 0;
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
-               if (agg)
-                       slave_agg_id = agg->aggregator_identifier;
-
-               if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
-                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
-                       break;
-               }
-       }
+       /* we couldn't find any suitable slave after the agg_no, so use the
+        * first suitable found, if found. */
+       if (first_ok_slave)
+               res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 
 out:
-       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 void bond_3ad_update_lacp_rate(struct bonding *bond)
 {
        struct port *port = NULL;
+       struct list_head *iter;
        struct slave *slave;
        int lacp_fast;
 
        lacp_fast = bond->params.lacp_fast;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave).port);
                __get_state_machine_lock(port);
                if (lacp_fast)
index f428ef57437279ec4bbf15e1c7e8e9b6a9da7a2c..02872405d35dc4a53d13473b31d1b517d22610d0 100644 (file)
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
        struct slave *slave, *least_loaded;
+       struct list_head *iter;
        long long max_gap;
 
        least_loaded = NULL;
        max_gap = LLONG_MIN;
 
        /* Find the slave with the largest gap */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        long long gap = compute_gap(slave);
 
@@ -382,30 +383,64 @@ out:
 static struct slave *rlb_next_rx_slave(struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct slave *rx_slave, *slave, *start_at;
-       int i = 0;
+       struct slave *before = NULL, *rx_slave = NULL, *slave;
+       struct list_head *iter;
+       bool found = false;
 
-       if (bond_info->next_rx_slave)
-               start_at = bond_info->next_rx_slave;
-       else
-               start_at = bond_first_slave(bond);
+       bond_for_each_slave(bond, slave, iter) {
+               if (!SLAVE_IS_OK(slave))
+                       continue;
+               if (!found) {
+                       if (!before || before->speed < slave->speed)
+                               before = slave;
+               } else {
+                       if (!rx_slave || rx_slave->speed < slave->speed)
+                               rx_slave = slave;
+               }
+               if (slave == bond_info->rx_slave)
+                       found = true;
+       }
+       /* we didn't find anything after the current or we have something
+        * better before and up to the current slave
+        */
+       if (!rx_slave || (before && rx_slave->speed < before->speed))
+               rx_slave = before;
 
-       rx_slave = NULL;
+       if (rx_slave)
+               bond_info->rx_slave = rx_slave;
 
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (SLAVE_IS_OK(slave)) {
-                       if (!rx_slave) {
-                               rx_slave = slave;
-                       } else if (slave->speed > rx_slave->speed) {
+       return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct slave *before = NULL, *rx_slave = NULL, *slave;
+       struct list_head *iter;
+       bool found = false;
+
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               if (!SLAVE_IS_OK(slave))
+                       continue;
+               if (!found) {
+                       if (!before || before->speed < slave->speed)
+                               before = slave;
+               } else {
+                       if (!rx_slave || rx_slave->speed < slave->speed)
                                rx_slave = slave;
-                       }
                }
+               if (slave == bond_info->rx_slave)
+                       found = true;
        }
+       /* we didn't find anything after the current or we have something
+        * better before and up to the current slave
+        */
+       if (!rx_slave || (before && rx_slave->speed < before->speed))
+               rx_slave = before;
 
-       if (rx_slave) {
-               slave = bond_next_slave(bond, rx_slave);
-               bond_info->next_rx_slave = slave;
-       }
+       if (rx_slave)
+               bond_info->rx_slave = rx_slave;
 
        return rx_slave;
 }
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
        struct arp_pkt *arp = arp_pkt(skb);
-       struct slave *assigned_slave;
+       struct slave *assigned_slave, *curr_active_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
 
        _lock_rx_hashtbl(bond);
 
+       curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
        hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
        client_info = &(bond_info->rx_hashtbl[hash_index]);
 
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                         * that the new client can be assigned to this entry.
                         */
                        if (bond->curr_active_slave &&
-                           client_info->slave != bond->curr_active_slave) {
-                               client_info->slave = bond->curr_active_slave;
+                           client_info->slave != curr_active_slave) {
+                               client_info->slave = curr_active_slave;
                                rlb_update_client(client_info);
                        }
                }
        }
        /* assign a new slave */
-       assigned_slave = rlb_next_rx_slave(bond);
+       assigned_slave = __rlb_next_rx_slave(bond);
 
        if (assigned_slave) {
                if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
        /* Don't modify or load balance ARPs that do not originate locally
         * (e.g.,arrive via a bridge).
         */
-       if (!bond_slave_has_mac(bond, arp->mac_src))
+       if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
                return NULL;
 
        if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (upper->priv_flags & IFF_802_1Q_VLAN)
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct slave *has_bond_addr = bond->curr_active_slave;
+       struct slave *tmp_slave1, *free_mac_slave = NULL;
+       struct list_head *iter;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* this is the first slave */
                return 0;
        }
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        /* The slave's address is equal to the address of the bond.
         * Search for a spare address in the bond for this slave.
         */
-       bond_for_each_slave(bond, tmp_slave1) {
+       bond_for_each_slave(bond, tmp_slave1, iter) {
                if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
                        /* no slave has tmp_slave1's perm addr
                         * as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
  */
 static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
-       char tmp_addr[ETH_ALEN];
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        struct sockaddr sa;
+       char tmp_addr[ETH_ALEN];
        int res;
 
        if (bond->alb_info.rlb_enabled)
                return 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                /* save net_device's current hw address */
                memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
 
@@ -1274,10 +1313,12 @@ unwind:
        sa.sa_family = bond->dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
-               memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
-               dev_set_mac_address(slave->dev, &sa);
-               memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+               memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+               dev_set_mac_address(rollback_slave->dev, &sa);
+               memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
        }
 
        return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        skb_reset_mac_header(skb);
        eth_data = eth_hdr(skb);
 
-       /* make sure that the curr_active_slave do not change during tx
-        */
-       read_lock(&bond->lock);
-       read_lock(&bond->curr_slave_lock);
-
        switch (ntohs(skb->protocol)) {
        case ETH_P_IP: {
                const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 
        if (!tx_slave) {
                /* unbalanced or unassigned, send through primary */
-               tx_slave = bond->curr_active_slave;
+               tx_slave = rcu_dereference(bond->curr_active_slave);
                bond_info->unbalanced_load += skb->len;
        }
 
        if (tx_slave && SLAVE_IS_OK(tx_slave)) {
-               if (tx_slave != bond->curr_active_slave) {
+               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
                        memcpy(eth_data->h_source,
                               tx_slave->dev->dev_addr,
                               ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                }
        }
 
-       read_unlock(&bond->curr_slave_lock);
-       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            alb_work.work);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct list_head *iter;
        struct slave *slave;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_info->tx_rebalance_counter = 0;
                bond_info->lp_counter = 0;
                goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        alb_send_learning_packets(slave, slave->dev->dev_addr);
 
                read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == bond->curr_active_slave) {
                                SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  */
 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
 {
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                alb_change_hw_addr_on_detach(bond, slave);
 
        tlb_clear_slave(bond, slave, 0);
 
        if (bond->alb_info.rlb_enabled) {
-               bond->alb_info.next_rx_slave = NULL;
+               bond->alb_info.rx_slave = NULL;
                rlb_clear_slave(bond, slave);
        }
 }
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        swap_slave = bond->curr_active_slave;
        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
-       if (!new_slave || list_empty(&bond->slave_list))
+       if (!new_slave || !bond_has_slaves(bond))
                return;
 
        /* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 
        ASSERT_RTNL();
 
+       /* in TLB mode, the slave might flip down/up with the old dev_addr,
+        * and thus filter bond->dev_addr's packets, so force bond's mac
+        */
+       if (bond->params.mode == BOND_MODE_TLB) {
+               struct sockaddr sa;
+               u8 tmp_addr[ETH_ALEN];
+
+               memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+               memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+               sa.sa_family = bond->dev->type;
+               /* we don't care if it can't change its mac, best effort */
+               dev_set_mac_address(new_slave->dev, &sa);
+
+               memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+       }
+
        /* curr_active_slave must be set before calling alb_swap_mac_addr */
        if (swap_slave) {
                /* swap mac address */
index c5eff5dafdfeab12ee4849fe75d9117d70c4859f..4226044efd083645db9229c0f88e507eae6d410b 100644 (file)
@@ -154,9 +154,7 @@ struct alb_bond_info {
        u8                      rx_ntt; /* flag - need to transmit
                                         * to all rx clients
                                         */
-       struct slave            *next_rx_slave;/* next slave to be assigned
-                                               * to a new rx client for
-                                               */
+       struct slave            *rx_slave;/* last slave to xmit from */
        u8                      primary_is_promisc;        /* boolean */
        u32                     rlb_promisc_timeout_counter;/* counts primary
                                                             * promiscuity time
index 55bbb8b8200c5bbd6949ab255015b3a23a174fc3..2daa066c6cdd2dfc9688f35611fc46ba0e25ff3e 100644 (file)
@@ -78,6 +78,7 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
+#include <net/flow_keys.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -159,7 +160,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
 module_param(xmit_hash_policy, charp, 0);
 MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
                                   "0 for layer 2 (default), 1 for layer 3+4, "
-                                  "2 for layer 2+3");
+                                  "2 for layer 2+3, 3 for encap layer 2+3, "
+                                  "4 for encap layer 3+4");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
@@ -217,6 +219,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
 {      "layer2",               BOND_XMIT_POLICY_LAYER2},
 {      "layer3+4",             BOND_XMIT_POLICY_LAYER34},
 {      "layer2+3",             BOND_XMIT_POLICY_LAYER23},
+{      "encap2+3",             BOND_XMIT_POLICY_ENCAP23},
+{      "encap3+4",             BOND_XMIT_POLICY_ENCAP34},
 {      NULL,                   -1},
 };
 
@@ -332,10 +336,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
                                __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
@@ -344,9 +349,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
        return 0;
 
 unwind:
-       /* unwind from the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave)
-               vlan_vid_del(slave->dev, proto, vid);
+       /* unwind to the slave that failed */
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+
+               vlan_vid_del(rollback_slave->dev, proto, vid);
+       }
 
        return res;
 }
@@ -360,9 +369,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
                                 __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                vlan_vid_del(slave->dev, proto, vid);
 
        if (bond_is_lb(bond))
@@ -382,15 +392,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
  */
 static int bond_set_carrier(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto down;
 
        if (bond->params.mode == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP) {
                        if (!netif_carrier_ok(bond->dev)) {
                                netif_carrier_on(bond->dev);
@@ -522,7 +533,9 @@ static int bond_check_dev_link(struct bonding *bond,
  */
 static int bond_set_promiscuity(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -532,7 +545,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_promiscuity(slave->dev, inc);
                        if (err)
                                return err;
@@ -546,7 +559,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
  */
 static int bond_set_allmulti(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -556,7 +571,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_allmulti(slave->dev, inc);
                        if (err)
                                return err;
@@ -774,43 +789,24 @@ static bool bond_should_change_active(struct bonding *bond)
 /**
  * find_best_interface - select the best available slave to be the active one
  * @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
  */
 static struct slave *bond_find_best_slave(struct bonding *bond)
 {
-       struct slave *new_active, *old_active;
-       struct slave *bestslave = NULL;
+       struct slave *slave, *bestslave = NULL;
+       struct list_head *iter;
        int mintime = bond->params.updelay;
-       int i;
-
-       new_active = bond->curr_active_slave;
 
-       if (!new_active) { /* there were no active slaves left */
-               new_active = bond_first_slave(bond);
-               if (!new_active)
-                       return NULL; /* still no slave, return NULL */
-       }
-
-       if ((bond->primary_slave) &&
-           bond->primary_slave->link == BOND_LINK_UP &&
-           bond_should_change_active(bond)) {
-               new_active = bond->primary_slave;
-       }
-
-       /* remember where to stop iterating over the slaves */
-       old_active = new_active;
-
-       bond_for_each_slave_from(bond, new_active, i, old_active) {
-               if (new_active->link == BOND_LINK_UP) {
-                       return new_active;
-               } else if (new_active->link == BOND_LINK_BACK &&
-                          IS_UP(new_active->dev)) {
-                       /* link up, but waiting for stabilization */
-                       if (new_active->delay < mintime) {
-                               mintime = new_active->delay;
-                               bestslave = new_active;
-                       }
+       if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+           bond_should_change_active(bond))
+               return bond->primary_slave;
+
+       bond_for_each_slave(bond, slave, iter) {
+               if (slave->link == BOND_LINK_UP)
+                       return slave;
+               if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+                   slave->delay < mintime) {
+                       mintime = slave->delay;
+                       bestslave = slave;
                }
        }
 
@@ -971,35 +967,6 @@ void bond_select_active_slave(struct bonding *bond)
        }
 }
 
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
-       list_add_tail_rcu(&new_slave->list, &bond->slave_list);
-       bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
-       list_del_rcu(&slave->list);
-       bond->slave_cnt--;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline int slave_enable_netpoll(struct slave *slave)
 {
@@ -1046,9 +1013,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (IS_UP(slave->dev))
                        slave_disable_netpoll(slave);
 }
@@ -1056,10 +1024,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        struct slave *slave;
        int err = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                err = slave_enable_netpoll(slave);
                if (err) {
                        bond_netpoll_cleanup(dev);
@@ -1087,10 +1056,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                           netdev_features_t features)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        netdev_features_t mask;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* Disable adding VLANs to empty bond. But why? --mq */
                features |= NETIF_F_VLAN_CHALLENGED;
                return features;
@@ -1100,7 +1070,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                features = netdev_increment_features(features,
                                                     slave->dev->features,
                                                     mask);
@@ -1118,16 +1088,17 @@ static void bond_compute_features(struct bonding *bond)
 {
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+       struct net_device *bond_dev = bond->dev;
+       struct list_head *iter;
+       struct slave *slave;
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int gso_max_size = GSO_MAX_SIZE;
-       struct net_device *bond_dev = bond->dev;
        u16 gso_max_segs = GSO_MAX_SEGS;
-       struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto done;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
@@ -1233,11 +1204,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 }
 
 static int bond_master_upper_dev_link(struct net_device *bond_dev,
-                                     struct net_device *slave_dev)
+                                     struct net_device *slave_dev,
+                                     struct slave *slave)
 {
        int err;
 
-       err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+       err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
        if (err)
                return err;
        slave_dev->flags |= IFF_SLAVE;
@@ -1258,7 +1230,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-       struct slave *new_slave = NULL;
+       struct slave *new_slave = NULL, *prev_slave;
        struct sockaddr addr;
        int link_reporting;
        int res = 0, i;
@@ -1313,7 +1285,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * bond ether type mutual exclusion - don't allow slaves of dissimilar
         * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
         */
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                if (bond_dev->type != slave_dev->type) {
                        pr_debug("%s: change device type from %d to %d\n",
                                 bond_dev->name,
@@ -1352,7 +1324,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (list_empty(&bond->slave_list)) {
+               if (!bond_has_slaves(bond)) {
                        pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
                                   bond_dev->name);
                        bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1340,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
@@ -1377,7 +1349,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                res = -ENOMEM;
                goto err_undo_flags;
        }
-       INIT_LIST_HEAD(&new_slave->list);
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1413,17 +1384,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       res = bond_master_upper_dev_link(bond_dev, slave_dev);
-       if (res) {
-               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
-               goto err_restore_mac;
-       }
-
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
                pr_debug("Opening slave %s failed\n", slave_dev->name);
-               goto err_unset_master;
+               goto err_restore_mac;
        }
 
        new_slave->bond = bond;
@@ -1479,21 +1444,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                goto err_close;
        }
 
-       write_lock_bh(&bond->lock);
-
-       bond_attach_slave(bond, new_slave);
+       prev_slave = bond_last_slave(bond);
 
        new_slave->delay = 0;
        new_slave->link_failure_count = 0;
 
-       write_unlock_bh(&bond->lock);
-
-       bond_compute_features(bond);
-
        bond_update_speed_duplex(new_slave);
 
-       read_lock(&bond->lock);
-
        new_slave->last_arp_rx = jiffies -
                (msecs_to_jiffies(bond->params.arp_interval) + 1);
        for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1511,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       write_lock_bh(&bond->curr_slave_lock);
-
        switch (bond->params.mode) {
        case BOND_MODE_ACTIVEBACKUP:
                bond_set_slave_inactive_flags(new_slave);
-               bond_select_active_slave(bond);
                break;
        case BOND_MODE_8023AD:
                /* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1522,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                bond_set_slave_inactive_flags(new_slave);
                /* if this is the first slave */
-               if (bond_first_slave(bond) == new_slave) {
+               if (!prev_slave) {
                        SLAVE_AD_INFO(new_slave).id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
-                       struct slave *prev_slave;
-
-                       prev_slave = bond_prev_slave(bond, new_slave);
                        SLAVE_AD_INFO(new_slave).id =
                                SLAVE_AD_INFO(prev_slave).id + 1;
                }
@@ -1588,7 +1539,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        case BOND_MODE_ALB:
                bond_set_active_slave(new_slave);
                bond_set_slave_inactive_flags(new_slave);
-               bond_select_active_slave(bond);
                break;
        default:
                pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1556,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                break;
        } /* switch(bond_mode) */
 
-       write_unlock_bh(&bond->curr_slave_lock);
-
-       bond_set_carrier(bond);
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
        slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
@@ -1624,17 +1570,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 #endif
 
-       read_unlock(&bond->lock);
-
-       res = bond_create_slave_symlinks(bond_dev, slave_dev);
-       if (res)
-               goto err_detach;
-
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
        if (res) {
                pr_debug("Error %d calling netdev_rx_handler_register\n", res);
-               goto err_dest_symlinks;
+               goto err_detach;
+       }
+
+       res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+       if (res) {
+               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+               goto err_unregister;
+       }
+
+       bond->slave_cnt++;
+       bond_compute_features(bond);
+       bond_set_carrier(bond);
+
+       if (USES_PRIMARY(bond->params.mode)) {
+               read_lock(&bond->lock);
+               write_lock_bh(&bond->curr_slave_lock);
+               bond_select_active_slave(bond);
+               write_unlock_bh(&bond->curr_slave_lock);
+               read_unlock(&bond->lock);
        }
 
        pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1604,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        return 0;
 
 /* Undo stages on error */
-err_dest_symlinks:
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+       netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
        if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1613,6 @@ err_detach:
 
        vlan_vids_del_by_dev(slave_dev, bond_dev);
        write_lock_bh(&bond->lock);
-       bond_detach_slave(bond, new_slave);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
        if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1632,6 @@ err_close:
        slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
-err_unset_master:
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
 err_restore_mac:
        if (!bond->params.fail_over_mac) {
                /* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1650,8 @@ err_free:
        kfree(new_slave);
 
 err_undo_flags:
-       bond_compute_features(bond);
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
                eth_hw_addr_random(bond_dev);
 
@@ -1724,6 +1677,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
        struct sockaddr addr;
+       int old_flags = bond_dev->flags;
        netdev_features_t old_features = bond_dev->features;
 
        /* slave is not a slave or master is not master of this slave */
@@ -1748,6 +1702,11 @@ static int __bond_release_one(struct net_device *bond_dev,
        }
 
        write_unlock_bh(&bond->lock);
+
+       /* release the slave from its bond */
+       bond->slave_cnt--;
+
+       bond_upper_dev_unlink(bond_dev, slave_dev);
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
@@ -1771,12 +1730,9 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        bond->current_arp_slave = NULL;
 
-       /* release the slave from its bond */
-       bond_detach_slave(bond, slave);
-
        if (!all && !bond->params.fail_over_mac) {
                if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
-                   !list_empty(&bond->slave_list))
+                   bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
                                   bond_dev->name, slave_dev->name,
                                   slave->perm_hwaddr,
@@ -1819,7 +1775,7 @@ static int __bond_release_one(struct net_device *bond_dev,
                write_lock_bh(&bond->lock);
        }
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
 
@@ -1835,7 +1791,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        unblock_netpoll_tx();
        synchronize_rcu();
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
                call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
        }
@@ -1847,27 +1803,29 @@ static int __bond_release_one(struct net_device *bond_dev,
                        bond_dev->name, slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 
        /* If the mode USES_PRIMARY, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
         */
        if (!USES_PRIMARY(bond->params.mode)) {
-               /* unset promiscuity level from slave */
-               if (bond_dev->flags & IFF_PROMISC)
+               /* unset promiscuity level from slave
+                * NOTE: The NETDEV_CHANGEADDR call above may change the value
+                * of the IFF_PROMISC flag in the bond_dev, but we need the
+                * value of that flag before that change, as that was the value
+                * when this slave was attached, so we cache at the start of the
+                * function and use it here. Same goes for ALLMULTI below
+                */
+               if (old_flags & IFF_PROMISC)
                        dev_set_promiscuity(slave_dev, -1);
 
                /* unset allmulti level from slave */
-               if (bond_dev->flags & IFF_ALLMULTI)
+               if (old_flags & IFF_ALLMULTI)
                        dev_set_allmulti(slave_dev, -1);
 
                bond_hw_addr_flush(bond_dev, slave_dev);
        }
 
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
        slave_disable_netpoll(slave);
 
        /* close slave before restoring its mac address */
@@ -1906,7 +1864,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = bond_release(bond_dev, slave_dev);
-       if (ret == 0 && list_empty(&bond->slave_list)) {
+       if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
@@ -1915,61 +1873,6 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        return ret;
 }
 
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- *  - <slave_dev> is not found in the list.
- *  - There is not active slave now.
- *  - <slave_dev> is already active.
- *  - The link state of <slave_dev> is not BOND_LINK_UP.
- *  - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *old_active = NULL;
-       struct slave *new_active = NULL;
-       int res = 0;
-
-       if (!USES_PRIMARY(bond->params.mode))
-               return -EINVAL;
-
-       /* Verify that bond_dev is indeed the master of slave_dev */
-       if (!(slave_dev->flags & IFF_SLAVE) ||
-           !netdev_has_upper_dev(slave_dev, bond_dev))
-               return -EINVAL;
-
-       read_lock(&bond->lock);
-
-       old_active = bond->curr_active_slave;
-       new_active = bond_get_slave_by_dev(bond, slave_dev);
-       /*
-        * Changing to the current active: do nothing; return success.
-        */
-       if (new_active && new_active == old_active) {
-               read_unlock(&bond->lock);
-               return 0;
-       }
-
-       if (new_active &&
-           old_active &&
-           new_active->link == BOND_LINK_UP &&
-           IS_UP(new_active->dev)) {
-               block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
-               bond_change_active_slave(bond, new_active);
-               write_unlock_bh(&bond->curr_slave_lock);
-               unblock_netpoll_tx();
-       } else
-               res = -EINVAL;
-
-       read_unlock(&bond->lock);
-
-       return res;
-}
-
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -1987,11 +1890,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        int i = 0, res = -ENODEV;
        struct slave *slave;
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
                        strcpy(info->slave_name, slave->dev->name);
@@ -2012,12 +1916,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 static int bond_miimon_inspect(struct bonding *bond)
 {
        int link_state, commit = 0;
+       struct list_head *iter;
        struct slave *slave;
        bool ignore_updelay;
 
        ignore_updelay = !bond->curr_active_slave ? true : false;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2111,9 +2016,10 @@ static int bond_miimon_inspect(struct bonding *bond)
 
 static void bond_miimon_commit(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2218,7 +2124,7 @@ void bond_mii_monitor(struct work_struct *work)
 
        delay = msecs_to_jiffies(bond->params.miimon);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -2267,7 +2173,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
                return true;
 
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (ip == bond_confirm_addr(upper, 0, ip)) {
                        ret = true;
                        break;
@@ -2342,10 +2248,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 *
                 * TODO: QinQ?
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+                                                 vlan_iter) {
                        if (!is_vlan_dev(vlan_upper))
                                continue;
-                       netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+                                                         iter) {
                                if (upper == rt->dst.dev) {
                                        vlan_id = vlan_dev_vlan_id(vlan_upper);
                                        rcu_read_unlock();
@@ -2358,7 +2266,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 * our upper vlans, then just search for any dev that
                 * matches, and in case it's a vlan - save the id
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                        if (upper == rt->dst.dev) {
                                /* if it's a vlan - get its VID */
                                if (is_vlan_dev(upper))
@@ -2505,11 +2413,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
+       struct list_head *iter;
        int do_failover = 0;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        oldcurrent = bond->curr_active_slave;
@@ -2521,7 +2430,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
                if (slave->link != BOND_LINK_UP) {
@@ -2612,10 +2521,11 @@ re_arm:
 static int bond_ab_arp_inspect(struct bonding *bond)
 {
        unsigned long trans_start, last_rx;
+       struct list_head *iter;
        struct slave *slave;
        int commit = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
                last_rx = slave_last_rx(bond, slave);
 
@@ -2682,9 +2592,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
 static void bond_ab_arp_commit(struct bonding *bond)
 {
        unsigned long trans_start;
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2755,8 +2666,9 @@ do_failover:
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-       struct slave *slave, *next_slave;
-       int i;
+       struct slave *slave, *before = NULL, *new_slave = NULL;
+       struct list_head *iter;
+       bool found = false;
 
        read_lock(&bond->curr_slave_lock);
 
@@ -2786,18 +2698,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
 
        bond_set_slave_inactive_flags(bond->current_arp_slave);
 
-       /* search for next candidate */
-       next_slave = bond_next_slave(bond, bond->current_arp_slave);
-       bond_for_each_slave_from(bond, slave, i, next_slave) {
-               if (IS_UP(slave->dev)) {
-                       slave->link = BOND_LINK_BACK;
-                       bond_set_slave_active_flags(slave);
-                       bond_arp_send_all(bond, slave);
-                       slave->jiffies = jiffies;
-                       bond->current_arp_slave = slave;
-                       break;
-               }
+       bond_for_each_slave(bond, slave, iter) {
+               if (!found && !before && IS_UP(slave->dev))
+                       before = slave;
 
+               if (found && !new_slave && IS_UP(slave->dev))
+                       new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
                 * simultaneous link failures and
@@ -2805,7 +2711,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
-               if (slave->link == BOND_LINK_UP) {
+               if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@ -2815,7 +2721,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
                        pr_info("%s: backup interface %s is now down.\n",
                                bond->dev->name, slave->dev->name);
                }
+               if (slave == bond->current_arp_slave)
+                       found = true;
        }
+
+       if (!new_slave && before)
+               new_slave = before;
+
+       if (!new_slave)
+               return;
+
+       new_slave->link = BOND_LINK_BACK;
+       bond_set_slave_active_flags(new_slave);
+       bond_arp_send_all(bond, new_slave);
+       new_slave->jiffies = jiffies;
+       bond->current_arp_slave = new_slave;
+
 }
 
 void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2829,7 +2750,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -3026,99 +2947,85 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
 
        if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-               return (data->h_dest[5] ^ data->h_source[5]) % count;
+               return data->h_dest[5] ^ data->h_source[5];
 
        return 0;
 }
 
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+                             struct flow_keys *fk)
 {
-       const struct ethhdr *data;
+       const struct ipv6hdr *iph6;
        const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       u32 v6hash;
-       const __be32 *s, *d;
+       int noff, proto = -1;
 
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_network_may_pull(skb, sizeof(*iph))) {
+       if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+               return skb_flow_dissect(skb, fk);
+
+       fk->ports = 0;
+       noff = skb_network_offset(skb);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+                       return false;
                iph = ip_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-                       (data->h_dest[5] ^ data->h_source[5])) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
-               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
-       }
-
-       return bond_xmit_hash_policy_l2(skb, count);
+               fk->src = iph->saddr;
+               fk->dst = iph->daddr;
+               noff += iph->ihl << 2;
+               if (!ip_is_fragment(iph))
+                       proto = iph->protocol;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+                       return false;
+               iph6 = ipv6_hdr(skb);
+               fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+               fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+               noff += sizeof(*iph6);
+               proto = iph6->nexthdr;
+       } else {
+               return false;
+       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+               fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+       return true;
 }
 
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
 {
-       u32 layer4_xor = 0;
-       const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       const __be32 *s, *d;
-       const __be16 *l4 = NULL;
-       __be16 _l4[2];
-       int noff = skb_network_offset(skb);
-       int poff;
-
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_may_pull(skb, noff + sizeof(*iph))) {
-               iph = ip_hdr(skb);
-               poff = proto_ports_offset(iph->protocol);
+       struct flow_keys flow;
+       u32 hash;
 
-               if (!ip_is_fragment(iph) && poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               return (layer4_xor ^
-                       ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               poff = proto_ports_offset(ipv6h->nexthdr);
-               if (poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
-                              (layer4_xor >> 8);
-               return layer4_xor % count;
-       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+           !bond_flow_dissect(bond, skb, &flow))
+               return bond_eth_hash(skb) % count;
+
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+           bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+               hash = bond_eth_hash(skb);
+       else
+               hash = (__force u32)flow.ports;
+       hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+       hash ^= (hash >> 16);
+       hash ^= (hash >> 8);
 
-       return bond_xmit_hash_policy_l2(skb, count);
+       return hash % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3148,13 +3055,14 @@ static void bond_work_cancel_all(struct bonding *bond)
 static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
        /* reset slave->backup and slave->inactive */
        read_lock(&bond->lock);
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave);
@@ -3214,12 +3122,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct rtnl_link_stats64 temp;
+       struct list_head *iter;
        struct slave *slave;
 
        memset(stats, 0, sizeof(*stats));
 
        read_lock_bh(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct rtnl_link_stats64 *sstats =
                        dev_get_stats(slave->dev, &temp);
 
@@ -3256,6 +3165,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 
 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
 {
+       struct bonding *bond = netdev_priv(bond_dev);
        struct net_device *slave_dev = NULL;
        struct ifbond k_binfo;
        struct ifbond __user *u_binfo = NULL;
@@ -3286,7 +3196,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 
 
                if (mii->reg_num == 1) {
-                       struct bonding *bond = netdev_priv(bond_dev);
                        mii->val_out = 0;
                        read_lock(&bond->lock);
                        read_lock(&bond->curr_slave_lock);
@@ -3358,7 +3267,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
                        break;
                case BOND_CHANGE_ACTIVE_OLD:
                case SIOCBONDCHANGEACTIVE:
-                       res = bond_ioctl_change_active(bond_dev, slave_dev);
+                       res = bond_option_active_slave_set(bond, slave_dev);
                        break;
                default:
                        res = -EOPNOTSUPP;
@@ -3386,22 +3295,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
 static void bond_set_rx_mode(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       ASSERT_RTNL();
 
+       rcu_read_lock();
        if (USES_PRIMARY(bond->params.mode)) {
-               slave = rtnl_dereference(bond->curr_active_slave);
+               slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
                        dev_mc_sync(slave->dev, bond_dev);
                }
        } else {
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave_rcu(bond, slave, iter) {
                        dev_uc_sync_multiple(slave->dev, bond_dev);
                        dev_mc_sync_multiple(slave->dev, bond_dev);
                }
        }
+       rcu_read_unlock();
 }
 
 static int bond_neigh_init(struct neighbour *n)
@@ -3464,7 +3375,8 @@ static int bond_neigh_setup(struct net_device *dev,
 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res = 0;
 
        pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3485,10 +3397,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
-               pr_debug("s %p s->p %p c_m %p\n",
+       bond_for_each_slave(bond, slave, iter) {
+               pr_debug("s %p c_m %p\n",
                         slave,
-                        bond_prev_slave(bond, slave),
                         slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
@@ -3513,13 +3424,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 
 unwind:
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3536,8 +3450,9 @@ unwind:
 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave, *rollback_slave;
        struct sockaddr *sa = addr, tmp_sa;
-       struct slave *slave;
+       struct list_head *iter;
        int res = 0;
 
        if (bond->params.mode == BOND_MODE_ALB)
@@ -3571,7 +3486,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
                pr_debug("slave %p %s\n", slave, slave->dev->name);
 
@@ -3603,13 +3518,16 @@ unwind:
        tmp_sa.sa_family = bond_dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3628,11 +3546,12 @@ unwind:
  */
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 {
+       struct list_head *iter;
        struct slave *slave;
        int i = slave_id;
 
        /* Here we start from the slave with slave_id */
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
                        if (slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3643,7 +3562,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 
        /* Here we start from the first slave up to slave_id */
        i = slave_id;
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
                if (slave_can_tx(slave)) {
@@ -3700,8 +3619,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
        return NETDEV_TX_OK;
 }
 
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
  * determined xmit_hash_policy(), If the selected device is not enabled,
  * find the next active slave.
  */
@@ -3709,8 +3627,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_xmit_slave_id(bond, skb,
-                          bond->xmit_hash_policy(skb, bond->slave_cnt));
+       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
 
        return NETDEV_TX_OK;
 }
@@ -3720,8 +3637,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave = NULL;
+       struct list_head *iter;
 
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
                if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3746,22 +3664,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 
 /*------------------------- Device initialization ---------------------------*/
 
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
-       switch (bond->params.xmit_policy) {
-       case BOND_XMIT_POLICY_LAYER23:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
-               break;
-       case BOND_XMIT_POLICY_LAYER34:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
-               break;
-       case BOND_XMIT_POLICY_LAYER2:
-       default:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
-               break;
-       }
-}
-
 /*
  * Lookup the slave that corresponds to a qid
  */
@@ -3770,13 +3672,14 @@ static inline int bond_slave_override(struct bonding *bond,
 {
        struct slave *slave = NULL;
        struct slave *check_slave;
+       struct list_head *iter;
        int res = 1;
 
        if (!skb->queue_mapping)
                return 1;
 
        /* Find out if any slaves have the same mapping as this skb. */
-       bond_for_each_slave_rcu(bond, check_slave) {
+       bond_for_each_slave_rcu(bond, check_slave, iter) {
                if (check_slave->queue_id == skb->queue_mapping) {
                        slave = check_slave;
                        break;
@@ -3862,7 +3765,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
 
        rcu_read_lock();
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                ret = __bond_start_xmit(skb, dev);
        else
                kfree_skb(skb);
@@ -3871,43 +3774,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
-       struct net_device *bond_dev = bond->dev;
-
-       switch (mode) {
-       case BOND_MODE_ROUNDROBIN:
-               break;
-       case BOND_MODE_ACTIVEBACKUP:
-               break;
-       case BOND_MODE_XOR:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_BROADCAST:
-               break;
-       case BOND_MODE_8023AD:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_ALB:
-               /* FALLTHRU */
-       case BOND_MODE_TLB:
-               break;
-       default:
-               /* Should never happen, mode already checked */
-               pr_err("%s: Error: Unknown bonding mode %d\n",
-                      bond_dev->name, mode);
-               break;
-       }
-}
-
 static int bond_ethtool_get_settings(struct net_device *bond_dev,
                                     struct ethtool_cmd *ecmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        unsigned long speed = 0;
+       struct list_head *iter;
        struct slave *slave;
 
        ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3919,7 +3791,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
@@ -3987,14 +3859,13 @@ static void bond_destructor(struct net_device *bond_dev)
        free_netdev(bond_dev);
 }
 
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
        /* initialize rwlocks */
        rwlock_init(&bond->lock);
        rwlock_init(&bond->curr_slave_lock);
-       INIT_LIST_HEAD(&bond->slave_list);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4004,7 +3875,6 @@ static void bond_setup(struct net_device *bond_dev)
        ether_setup(bond_dev);
        bond_dev->netdev_ops = &bond_netdev_ops;
        bond_dev->ethtool_ops = &bond_ethtool_ops;
-       bond_set_mode_ops(bond, bond->params.mode);
 
        bond_dev->destructor = bond_destructor;
 
@@ -4050,12 +3920,13 @@ static void bond_setup(struct net_device *bond_dev)
 static void bond_uninit(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *tmp_slave;
+       struct list_head *iter;
+       struct slave *slave;
 
        bond_netpoll_cleanup(bond_dev);
 
        /* Release the bonded slaves */
-       list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+       bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
        pr_info("%s: released all slaves\n", bond_dev->name);
 
@@ -4488,32 +4359,11 @@ static int bond_init(struct net_device *bond_dev)
        return 0;
 }
 
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
-       if (tb[IFLA_ADDRESS]) {
-               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
-                       return -EINVAL;
-               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
-                       return -EADDRNOTAVAIL;
-       }
-       return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
 {
        return tx_queues;
 }
 
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
-       .kind                   = "bond",
-       .priv_size              = sizeof(struct bonding),
-       .setup                  = bond_setup,
-       .validate               = bond_validate,
-       .get_num_tx_queues      = bond_get_num_tx_queues,
-       .get_num_rx_queues      = bond_get_num_tx_queues, /* Use the same number
-                                                            as for TX queues */
-};
-
 /* Create a new bond based on the specified name and bonding parameters.
  * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4600,7 +4450,7 @@ static int __init bonding_init(void)
        if (res)
                goto out;
 
-       res = rtnl_link_register(&bond_link_ops);
+       res = bond_netlink_init();
        if (res)
                goto err_link;
 
@@ -4616,7 +4466,7 @@ static int __init bonding_init(void)
 out:
        return res;
 err:
-       rtnl_link_unregister(&bond_link_ops);
+       bond_netlink_fini();
 err_link:
        unregister_pernet_subsys(&bond_net_ops);
        goto out;
@@ -4629,7 +4479,7 @@ static void __exit bonding_exit(void)
 
        bond_destroy_debugfs();
 
-       rtnl_link_unregister(&bond_link_ops);
+       bond_netlink_fini();
        unregister_pernet_subsys(&bond_net_ops);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4646,4 +4496,3 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644 (file)
index 0000000..7661261
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+       [IFLA_BOND_MODE]                = { .type = NLA_U8 },
+       [IFLA_BOND_ACTIVE_SLAVE]        = { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+                          struct nlattr *tb[], struct nlattr *data[])
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       int err;
+
+       if (data && data[IFLA_BOND_MODE]) {
+               int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+               err = bond_option_mode_set(bond, mode);
+               if (err)
+                       return err;
+       }
+       if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+               int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+               struct net_device *slave_dev;
+
+               if (ifindex == 0) {
+                       slave_dev = NULL;
+               } else {
+                       slave_dev = __dev_get_by_index(dev_net(bond_dev),
+                                                      ifindex);
+                       if (!slave_dev)
+                               return -ENODEV;
+               }
+               err = bond_option_active_slave_set(bond, slave_dev);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       int err;
+
+       err = bond_changelink(bond_dev, tb, data);
+       if (err < 0)
+               return err;
+
+       return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+       return nla_total_size(sizeof(u8));      /* IFLA_BOND_MODE */
+               + nla_total_size(sizeof(u32));  /* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+                         const struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+       if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+           (slave_dev &&
+            nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+       .kind                   = "bond",
+       .priv_size              = sizeof(struct bonding),
+       .setup                  = bond_setup,
+       .maxtype                = IFLA_BOND_MAX,
+       .policy                 = bond_policy,
+       .validate               = bond_validate,
+       .newlink                = bond_newlink,
+       .changelink             = bond_changelink,
+       .get_size               = bond_get_size,
+       .fill_info              = bond_fill_info,
+       .get_num_tx_queues      = bond_get_num_tx_queues,
+       .get_num_rx_queues      = bond_get_num_tx_queues, /* Use the same number
+                                                            as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+       return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+       rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644 (file)
index 0000000..9a5223c
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+       int i;
+
+       for (i = 0; bond_mode_tbl[i].modename; i++);
+
+       return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+       if (!bond_mode_is_valid(mode)) {
+               pr_err("invalid mode value %d.\n", mode);
+               return -EINVAL;
+       }
+
+       if (bond->dev->flags & IFF_UP) {
+               pr_err("%s: unable to update mode because interface is up.\n",
+                      bond->dev->name);
+               return -EPERM;
+       }
+
+       if (bond_has_slaves(bond)) {
+               pr_err("%s: unable to update mode because bond has slaves.\n",
+                       bond->dev->name);
+               return -EPERM;
+       }
+
+       if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+               pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+                      bond->dev->name, bond_mode_tbl[mode].modename);
+               return -EINVAL;
+       }
+
+       /* don't cache arp_validate between modes */
+       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+       bond->params.mode = mode;
+       return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+                                                        struct slave *slave)
+{
+       return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+       struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+       return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+       return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+                                struct net_device *slave_dev)
+{
+       int ret = 0;
+
+       if (slave_dev) {
+               if (!netif_is_bond_slave(slave_dev)) {
+                       pr_err("Device %s is not bonding slave.\n",
+                              slave_dev->name);
+                       return -EINVAL;
+               }
+
+               if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+                       pr_err("%s: Device %s is not our slave.\n",
+                              bond->dev->name, slave_dev->name);
+                       return -EINVAL;
+               }
+       }
+
+       if (!USES_PRIMARY(bond->params.mode)) {
+               pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+                      bond->dev->name, bond->dev->name, bond->params.mode);
+               return -EINVAL;
+       }
+
+       block_netpoll_tx();
+       read_lock(&bond->lock);
+       write_lock_bh(&bond->curr_slave_lock);
+
+       /* check to see if we are clearing active */
+       if (!slave_dev) {
+               pr_info("%s: Clearing current active slave.\n",
+               bond->dev->name);
+               rcu_assign_pointer(bond->curr_active_slave, NULL);
+               bond_select_active_slave(bond);
+       } else {
+               struct slave *old_active = bond->curr_active_slave;
+               struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+               BUG_ON(!new_active);
+
+               if (new_active == old_active) {
+                       /* do nothing */
+                       pr_info("%s: %s is already the current active slave.\n",
+                               bond->dev->name, new_active->dev->name);
+               } else {
+                       if (old_active && (new_active->link == BOND_LINK_UP) &&
+                           IS_UP(new_active->dev)) {
+                               pr_info("%s: Setting %s as active slave.\n",
+                                       bond->dev->name, new_active->dev->name);
+                               bond_change_active_slave(bond, new_active);
+                       } else {
+                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+                                      bond->dev->name, new_active->dev->name,
+                                      new_active->dev->name);
+                               ret = -EINVAL;
+                       }
+               }
+       }
+
+       write_unlock_bh(&bond->curr_slave_lock);
+       read_unlock(&bond->lock);
+       unblock_netpoll_tx();
+       return ret;
+}
index 20a6ee25bb63e42cdf89c0273d8e1afa30234f26..fb868d6c22dac5c75ecd156e61089e7f6806b6c9 100644 (file)
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(&bond->lock)
 {
        struct bonding *bond = seq->private;
-       loff_t off = 0;
+       struct list_head *iter;
        struct slave *slave;
+       loff_t off = 0;
 
        /* make sure the bond won't be taken away */
        rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                return SEQ_START_TOKEN;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (++off == *pos)
                        return slave;
 
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct bonding *bond = seq->private;
-       struct slave *slave = v;
+       struct list_head *iter;
+       struct slave *slave;
+       bool found = false;
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
                return bond_first_slave(bond);
 
-       if (bond_is_last_slave(bond, slave))
+       if (bond_is_last_slave(bond, v))
                return NULL;
-       slave = bond_next_slave(bond, slave);
 
-       return slave;
+       bond_for_each_slave(bond, slave, iter) {
+               if (found)
+                       return slave;
+               if (slave == v)
+                       found = true;
+       }
+
+       return NULL;
 }
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
index c29b836749b6323fe86c35e7762b25a3c8596978..47749c970a01a67c189188767c41e94e2ff1fbd1 100644 (file)
@@ -168,41 +168,6 @@ static const struct class_attribute class_attr_bonding_masters = {
        .namespace = bonding_namespace,
 };
 
-int bond_create_slave_symlinks(struct net_device *master,
-                              struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-       int ret = 0;
-
-       /* first, create a link from the slave back to the master */
-       ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
-                               "master");
-       if (ret)
-               return ret;
-       /* next, create a link from the master to the slave */
-       sprintf(linkname, "slave_%s", slave->name);
-       ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
-                               linkname);
-
-       /* free the master link created earlier in case of error */
-       if (ret)
-               sysfs_remove_link(&(slave->dev.kobj), "master");
-
-       return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
-                                struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-
-       sysfs_remove_link(&(slave->dev.kobj), "master");
-       sprintf(linkname, "slave_%s", slave->name);
-       sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
 /*
  * Show the slaves in the current bond.
  */
@@ -210,11 +175,14 @@ static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ)) {
                        /* not enough space for another interface name */
                        if ((PAGE_SIZE - res) > 10)
@@ -224,7 +192,9 @@ static ssize_t bonding_show_slaves(struct device *d,
                }
                res += sprintf(buf + res, "%s ", slave->dev->name);
        }
-       read_unlock(&bond->lock);
+
+       rtnl_unlock();
+
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
 
@@ -313,50 +283,26 @@ static ssize_t bonding_store_mode(struct device *d,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       int new_value, ret = count;
+       int new_value, ret;
        struct bonding *bond = to_bond(d);
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
-       if (bond->dev->flags & IFF_UP) {
-               pr_err("unable to update mode of %s because interface is up.\n",
-                      bond->dev->name);
-               ret = -EPERM;
-               goto out;
-       }
-
-       if (!list_empty(&bond->slave_list)) {
-               pr_err("unable to update mode of %s because it has slaves.\n",
-                       bond->dev->name);
-               ret = -EPERM;
-               goto out;
-       }
-
        new_value = bond_parse_parm(buf, bond_mode_tbl);
        if (new_value < 0)  {
                pr_err("%s: Ignoring invalid mode value %.*s.\n",
                       bond->dev->name, (int)strlen(buf) - 1, buf);
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
-       if ((new_value == BOND_MODE_ALB ||
-            new_value == BOND_MODE_TLB) &&
-           bond->params.arp_interval) {
-               pr_err("%s: %s mode is incompatible with arp monitoring.\n",
-                      bond->dev->name, bond_mode_tbl[new_value].modename);
-               ret = -EINVAL;
-               goto out;
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       ret = bond_option_mode_set(bond, new_value);
+       if (!ret) {
+               pr_info("%s: setting mode to %s (%d).\n",
+                       bond->dev->name, bond_mode_tbl[new_value].modename,
+                       new_value);
+               ret = count;
        }
 
-       /* don't cache arp_validate between modes */
-       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
-       bond->params.mode = new_value;
-       bond_set_mode_ops(bond, bond->params.mode);
-       pr_info("%s: setting mode to %s (%d).\n",
-               bond->dev->name, bond_mode_tbl[new_value].modename,
-               new_value);
-out:
        rtnl_unlock();
        return ret;
 }
@@ -392,7 +338,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
                ret = -EINVAL;
        } else {
                bond->params.xmit_policy = new_value;
-               bond_set_mode_ops(bond, bond->params.mode);
                pr_info("%s: setting xmit hash policy to %s (%d).\n",
                        bond->dev->name,
                        xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +467,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
                       bond->dev->name);
                ret = -EPERM;
@@ -656,11 +601,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                                         const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        __be32 newtarget, *targets;
        unsigned long *targets_rx;
        int ind, i, j, ret = -EINVAL;
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        targets = bond->params.arp_targets;
        newtarget = in_aton(buf + 1);
        /* look for adds */
@@ -688,7 +637,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                         &newtarget);
                /* not to race with bond_arp_rcv */
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        slave->target_last_arp_rx[ind] = jiffies;
                targets[ind] = newtarget;
                write_unlock_bh(&bond->lock);
@@ -714,7 +663,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                        &newtarget);
 
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        targets_rx = slave->target_last_arp_rx;
                        j = ind;
                        for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -734,6 +683,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
 
        ret = count;
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1111,6 +1061,7 @@ static ssize_t bonding_store_primary(struct device *d,
                                     const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        char ifname[IFNAMSIZ];
        struct slave *slave;
 
@@ -1138,7 +1089,7 @@ static ssize_t bonding_store_primary(struct device *d,
                goto out;
        }
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        pr_info("%s: Setting %s as primary slave.\n",
                                bond->dev->name, slave->dev->name);
@@ -1268,13 +1219,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
                                         char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct slave *curr;
+       struct net_device *slave_dev;
        int count = 0;
 
        rcu_read_lock();
-       curr = rcu_dereference(bond->curr_active_slave);
-       if (USES_PRIMARY(bond->params.mode) && curr)
-               count = sprintf(buf, "%s\n", curr->dev->name);
+       slave_dev = bond_option_active_slave_get_rcu(bond);
+       if (slave_dev)
+               count = sprintf(buf, "%s\n", slave_dev->name);
        rcu_read_unlock();
 
        return count;
@@ -1284,80 +1235,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
                                          struct device_attribute *attr,
                                          const char *buf, size_t count)
 {
-       struct slave *slave, *old_active, *new_active;
+       int ret;
        struct bonding *bond = to_bond(d);
        char ifname[IFNAMSIZ];
+       struct net_device *dev;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       old_active = new_active = NULL;
-       block_netpoll_tx();
-       read_lock(&bond->lock);
-       write_lock_bh(&bond->curr_slave_lock);
-
-       if (!USES_PRIMARY(bond->params.mode)) {
-               pr_info("%s: Unable to change active slave; %s is in mode %d\n",
-                       bond->dev->name, bond->dev->name, bond->params.mode);
-               goto out;
-       }
-
        sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
-       /* check to see if we are clearing active */
        if (!strlen(ifname) || buf[0] == '\n') {
-               pr_info("%s: Clearing current active slave.\n",
-                       bond->dev->name);
-               rcu_assign_pointer(bond->curr_active_slave, NULL);
-               bond_select_active_slave(bond);
-               goto out;
-       }
-
-       bond_for_each_slave(bond, slave) {
-               if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-                       old_active = bond->curr_active_slave;
-                       new_active = slave;
-                       if (new_active == old_active) {
-                               /* do nothing */
-                               pr_info("%s: %s is already the current"
-                                       " active slave.\n",
-                                       bond->dev->name,
-                                       slave->dev->name);
-                               goto out;
-                       } else {
-                               if ((new_active) &&
-                                   (old_active) &&
-                                   (new_active->link == BOND_LINK_UP) &&
-                                   IS_UP(new_active->dev)) {
-                                       pr_info("%s: Setting %s as active"
-                                               " slave.\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
-                                       bond_change_active_slave(bond,
-                                                                new_active);
-                               } else {
-                                       pr_info("%s: Could not set %s as"
-                                               " active slave; either %s is"
-                                               " down or the link is down.\n",
-                                               bond->dev->name,
-                                               slave->dev->name,
-                                               slave->dev->name);
-                               }
-                               goto out;
-                       }
+               dev = NULL;
+       } else {
+               dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+               if (!dev) {
+                       ret = -ENODEV;
+                       goto out;
                }
        }
 
-       pr_info("%s: Unable to set %.*s as active slave.\n",
-               bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
-       write_unlock_bh(&bond->curr_slave_lock);
-       read_unlock(&bond->lock);
-       unblock_netpoll_tx();
+       ret = bond_option_active_slave_set(bond, dev);
+       if (!ret)
+               ret = count;
 
+ out:
        rtnl_unlock();
 
-       return count;
+       return ret;
 
 }
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1493,14 +1397,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
                                     char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
                        /* not enough space for another interface_name:queue_id pair */
                        if ((PAGE_SIZE - res) > 10)
@@ -1511,9 +1415,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
                res += sprintf(buf + res, "%s:%d ",
                               slave->dev->name, slave->queue_id);
        }
-       read_unlock(&bond->lock);
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
+
        rtnl_unlock();
 
        return res;
@@ -1529,6 +1433,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
 {
        struct slave *slave, *update_slave;
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        u16 qid;
        int ret = count;
        char *delim;
@@ -1561,11 +1466,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
        if (!sdev)
                goto err_no_cmd;
 
-       read_lock(&bond->lock);
-
        /* Search for thes slave and check for duplicate qids */
        update_slave = NULL;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (sdev == slave->dev)
                        /*
                         * We don't need to check the matching
@@ -1573,23 +1476,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
                         */
                        update_slave = slave;
                else if (qid && qid == slave->queue_id) {
-                       goto err_no_cmd_unlock;
+                       goto err_no_cmd;
                }
        }
 
        if (!update_slave)
-               goto err_no_cmd_unlock;
+               goto err_no_cmd;
 
        /* Actually set the qids for the slave */
        update_slave->queue_id = qid;
 
-       read_unlock(&bond->lock);
 out:
        rtnl_unlock();
        return ret;
 
-err_no_cmd_unlock:
-       read_unlock(&bond->lock);
 err_no_cmd:
        pr_info("invalid input for queue_id set for %s.\n",
                bond->dev->name);
@@ -1619,8 +1519,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
 {
        struct bonding *bond = to_bond(d);
        int new_value, ret = count;
+       struct list_head *iter;
        struct slave *slave;
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (sscanf(buf, "%d", &new_value) != 1) {
                pr_err("%s: no all_slaves_active value specified.\n",
                       bond->dev->name);
@@ -1640,8 +1544,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                goto out;
        }
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (!bond_is_active_slave(slave)) {
                        if (new_value)
                                slave->inactive = 0;
@@ -1649,8 +1552,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                                slave->inactive = 1;
                }
        }
-       read_unlock(&bond->lock);
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
index 03cf3fd14490c4e4dcf8cd2d61f8bca99c9d55db..046a60535e04e3fc7f9e6be79aa8c8cc0bb1867f 100644 (file)
 #define TX_QUEUE_OVERRIDE(mode)                                \
                        (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
                         ((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode)                  \
+               (((mode) == BOND_MODE_TLB) ||   \
+                ((mode) == BOND_MODE_ALB))
+
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
        res; })
 
 /* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
 
 /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
 #define bond_first_slave(bond) \
-       list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+               NULL)
 #define bond_last_slave(bond) \
-       (list_empty(&(bond)->slave_list) ? NULL : \
-                                          bond_to_slave((bond)->slave_list.prev))
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+               NULL)
 
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
-       (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
-                                        bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
-       (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
-                                         bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond:      the bond holding this list.
- * @pos:       current slave.
- * @cnt:       counter for max number of moves
- * @start:     starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
-       for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
-            cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
 
 /**
  * bond_for_each_slave - iterate over all slaves
  * @bond:      the bond holding this list
  * @pos:       current slave
+ * @iter:      list_head * iterator
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave(bond, pos) \
-       list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+       netdev_for_each_lower_private((bond)->dev, pos, iter)
 
 /* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
-       list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond:      the bond holding this list
- * @pos:       slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
-       list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+       netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
@@ -188,7 +167,6 @@ struct bond_parm_tbl {
 
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
-       struct list_head list;
        struct bonding *bond; /* our master */
        int    delay;
        unsigned long jiffies;
@@ -228,7 +206,6 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   list_head slave_list;
        struct   slave *curr_active_slave;
        struct   slave *current_arp_slave;
        struct   slave *primary_slave;
@@ -245,7 +222,6 @@ struct bonding {
        char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
        struct   list_head bond_list;
-       int      (*xmit_hash_policy)(struct sk_buff *, int);
        u16      rr_tx_counter;
        struct   ad_bond_info ad_info;
        struct   alb_bond_info alb_info;
@@ -276,13 +252,7 @@ struct bonding {
 static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
                                                  struct net_device *slave_dev)
 {
-       struct slave *slave = NULL;
-
-       bond_for_each_slave(bond, slave)
-               if (slave->dev == slave_dev)
-                       return slave;
-
-       return NULL;
+       return netdev_lower_dev_get_private(bond->dev, slave_dev);
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +264,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-       return (bond->params.mode == BOND_MODE_TLB ||
-               bond->params.mode == BOND_MODE_ALB);
+       return BOND_MODE_IS_LB(bond->params.mode);
 }
 
 static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +401,18 @@ static inline bool slave_can_tx(struct slave *slave)
 struct bond_net;
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
 void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 void bond_mii_monitor(struct work_struct *);
 void bond_loadbalance_arp_mon(struct work_struct *);
 void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +422,14 @@ void bond_debug_register(struct bonding *bond);
 void bond_debug_unregister(struct bonding *bond);
 void bond_debug_reregister(struct bonding *bond);
 const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
 
 struct bond_net {
        struct net *            net;    /* Associated network namespace */
@@ -492,9 +466,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
 static inline struct slave *bond_slave_has_mac(struct bonding *bond,
                                               const u8 *mac)
 {
+       struct list_head *iter;
        struct slave *tmp;
 
-       bond_for_each_slave(bond, tmp)
+       bond_for_each_slave(bond, tmp, iter)
+               if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+                       return tmp;
+
+       return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+                                              const u8 *mac)
+{
+       struct list_head *iter;
+       struct slave *tmp;
+
+       bond_for_each_slave_rcu(bond, tmp, iter)
                if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
                        return tmp;
 
@@ -528,4 +517,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
 extern const struct bond_parm_tbl pri_reselect_tbl[];
 extern struct bond_parm_tbl ad_select_tbl[];
 
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
 #endif /* _LINUX_BONDING_H */
index 3b1ff6148702beb3818fbae7da2b0206f7d1c7f4..cf0f63e14e5339d4a6ed324076a4cad7854dfc48 100644 (file)
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
        priv->reg_base = addr;
        priv->devtype_data = *devtype_data;
        priv->clk = clk;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->mb0_id = 0x7ff;
 
        netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
 
 static const struct platform_device_id at91_can_id_table[] = {
        {
-               .name = "at91_can",
+               .name = "at91sam9x5_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
        }, {
-               .name = "at91sam9x5_can",
+               .name = "at91_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
        }, {
                /* sentinel */
index a2700d25ff0ed87fb800d4c157aeef911234d26e..8a0b515b33ea57c5804f1c9a82b774be8aadee7f 100644 (file)
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
        struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
        unsigned short *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -EINVAL;
index b374be7891a296bde66696e19d2137a47d7ffea3..bce0be54c2f59587a2498d2f37821f2634b886d9 100644 (file)
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
        return 0;
 
 out_free_c_can:
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 out_iounmap:
        pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 
        unregister_c_can_dev(dev);
 
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 
        pci_iounmap(pdev, priv->base);
index 294ced3cc227520883c6ebe50ac255634d0f0f7c..d66ac265269c68883070f7857b1cd6d928531f0e 100644 (file)
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
        .driver = {
                .name = KBUILD_MODNAME,
                .owner = THIS_MODULE,
-               .of_match_table = of_match_ptr(c_can_of_table),
+               .of_match_table = c_can_of_table,
        },
        .probe = c_can_plat_probe,
        .remove = c_can_plat_remove,
index 034bdd816a60c74104b00b5b203c69f120f0555d..ad76734b3ecc79556ee4eb59e5c2025a10824b48 100644 (file)
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
                                   struct cc770_priv *priv)
 {
 
-       struct cc770_platform_data *pdata = pdev->dev.platform_data;
+       struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
        priv->can.clock.freq = pdata->osc_freq;
        if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
 
        if (pdev->dev.of_node)
                err = cc770_get_of_node_data(pdev, priv);
-       else if (pdev->dev.platform_data)
+       else if (dev_get_platdata(&pdev->dev))
                err = cc770_get_platform_data(pdev, priv);
        else
                err = -ENODEV;
index f9cba4123c663084b66c2dbdaac5885a579278fd..1870c4731a572d193bef5e6dc0b7364e5f1751ed 100644 (file)
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
        size_t size;
 
        size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
-       size += sizeof(struct can_ctrlmode);  /* IFLA_CAN_CTRLMODE */
+       size += nla_total_size(sizeof(struct can_ctrlmode));  /* IFLA_CAN_CTRLMODE */
        size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
-       size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
-       size += sizeof(struct can_clock);     /* IFLA_CAN_CLOCK */
+       size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+       size += nla_total_size(sizeof(struct can_clock));     /* IFLA_CAN_CLOCK */
        if (priv->do_get_berr_counter)        /* IFLA_CAN_BERR_COUNTER */
-               size += sizeof(struct can_berr_counter);
+               size += nla_total_size(sizeof(struct can_berr_counter));
        if (priv->bittiming_const)            /* IFLA_CAN_BITTIMING_CONST */
-               size += sizeof(struct can_bittiming_const);
+               size += nla_total_size(sizeof(struct can_bittiming_const));
 
        return size;
 }
index 71c677e651d7cbead0f673f53a93e89514e74eb3..ae08cf129ebbb0bda31f4b74893574b9703253f0 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->base;
-       unsigned int i;
        int err;
        u32 reg_mcr, reg_ctrl;
 
@@ -736,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
         *
         */
        reg_mcr = flexcan_read(&regs->mcr);
+       reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
        reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
                FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
-               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+               FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        flexcan_write(reg_mcr, &regs->mcr);
 
@@ -772,16 +773,9 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
-       for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
-               flexcan_write(0, &regs->cantxfg[i].can_ctrl);
-               flexcan_write(0, &regs->cantxfg[i].can_id);
-               flexcan_write(0, &regs->cantxfg[i].data[0]);
-               flexcan_write(0, &regs->cantxfg[i].data[1]);
-
-               /* put MB into rx queue */
-               flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
-                       &regs->cantxfg[i].can_ctrl);
-       }
+       /* Abort any pending TX, mark Mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+                     &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
        flexcan_write(0x0, &regs->rxgmask);
@@ -991,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
 }
 
 static const struct of_device_id flexcan_of_match[] = {
-       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
-       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1074,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->dev = dev;
        priv->clk_ipg = clk_ipg;
        priv->clk_per = clk_per;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
 
        priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
index 36bd6fa1c7f3e4760b5f69f65b420f8d55b66a5f..ab5909a7bae9efa378c0466936002460205f5cf1 100644 (file)
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
        struct device *dev;
        int ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata)
                return -ENXIO;
 
index fe7dd696957ea3d8c4f7e34b21baa866bb797b55..08ac401e0214e091bdf8b851990dd8b45e0bec89 100644 (file)
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
 {
        struct net_device *net;
        struct mcp251x_priv *priv;
-       struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
        int ret = -ENODEV;
 
        if (!pdata)
index 9c24d60a23b1dbf459c75d14f7e3ac340350b9d0..e98abb97a0506672ab5cfb2af24fb5484fee61fe 100644 (file)
@@ -297,8 +297,8 @@ struct mscan_priv {
        struct napi_struct napi;
 };
 
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
 
 #endif /* __MSCAN_H__ */
index 5c314a961970b0041c7776da283979f00c4114f8..5f0e9b3bfa7bb9a8f80f5267058b83c5db53f4c6 100644 (file)
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
                pci_disable_msi(priv->dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        pch_can_reset(priv);
        pci_iounmap(pdev, priv->regs);
        free_candev(priv->ndev);
index 3752342a678ac5320e68bf69f747b4f69034758d..835921388e7ba6ab4b6ee98306c629691421d8a7 100644 (file)
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void ems_pci_card_reset(struct ems_pci_card *card)
index 217585b97cd3e0851439f8b388c040058b668100..087b13bd300e845a6231d80cfbb29ab0d3c8d41f 100644 (file)
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver kvaser_pci_driver = {
index 6b6f0ad75090c4ea463ae49b8e2041dbdaca55df..065ca49eb45e72c48c9d1cc5f8fbda0256fdb195 100644 (file)
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
        pci_iounmap(pdev, cfg_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver peak_pci_driver = {
index c52c1e96bf90741455eea01add4129fcfc590bad..f9b4f81cd86a4601abc8fbcb5dd7eaf7aaef81a5 100644 (file)
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 /*
index 8e259c541036c575fc181796ebfe21f0c8798a1d..29f9b632118742eea67904bd445922b2773a894f 100644 (file)
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
        struct resource *res_mem, *res_irq;
        struct sja1000_platform_data *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -ENODEV;
index 874188ba06f7172fed36b93db05be06ae0e2bd59..25377e547f9b01f49167caa13f510c51fb2bd1c7 100644 (file)
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
 /* maximum rx buffer len: extended CAN frame with timestamp */
 #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
 
+#define SLC_CMD_LEN 1
+#define SLC_SFF_ID_LEN 3
+#define SLC_EFF_ID_LEN 8
+
 struct slcan {
        int                     magic;
 
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
 {
        struct sk_buff *skb;
        struct can_frame cf;
-       int i, dlc_pos, tmp;
-       unsigned long ultmp;
-       char cmd = sl->rbuff[0];
-
-       if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+       int i, tmp;
+       u32 tmpid;
+       char *cmd = sl->rbuff;
+
+       cf.can_id = 0;
+
+       switch (*cmd) {
+       case 'r':
+               cf.can_id = CAN_RTR_FLAG;
+               /* fallthrough */
+       case 't':
+               /* store dlc ASCII value and terminate SFF CAN ID string */
+               cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
+               sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
+               /* point to payload data behind the dlc */
+               cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
+               break;
+       case 'R':
+               cf.can_id = CAN_RTR_FLAG;
+               /* fallthrough */
+       case 'T':
+               cf.can_id |= CAN_EFF_FLAG;
+               /* store dlc ASCII value and terminate EFF CAN ID string */
+               cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
+               sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
+               /* point to payload data behind the dlc */
+               cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
+               break;
+       default:
                return;
+       }
 
-       if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
-               dlc_pos = 4; /* dlc position tiiid */
-       else
-               dlc_pos = 9; /* dlc position Tiiiiiiiid */
-
-       if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+       if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
                return;
 
-       cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+       cf.can_id |= tmpid;
 
-       sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
-
-       if (kstrtoul(sl->rbuff+1, 16, &ultmp))
+       /* get can_dlc from sanitized ASCII value */
+       if (cf.can_dlc >= '0' && cf.can_dlc < '9')
+               cf.can_dlc -= '0';
+       else
                return;
 
-       cf.can_id = ultmp;
-
-       if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
-               cf.can_id |= CAN_EFF_FLAG;
-
-       if ((cmd | 0x20) == 'r') /* RTR frame */
-               cf.can_id |= CAN_RTR_FLAG;
-
        *(u64 *) (&cf.data) = 0; /* clear payload */
 
-       for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
-               tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
-               if (tmp < 0)
-                       return;
-               cf.data[i] = (tmp << 4);
-               tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
-               if (tmp < 0)
-                       return;
-               cf.data[i] |= tmp;
+       /* RTR frames may have a dlc > 0 but they never have any data bytes */
+       if (!(cf.can_id & CAN_RTR_FLAG)) {
+               for (i = 0; i < cf.can_dlc; i++) {
+                       tmp = hex_to_bin(*cmd++);
+                       if (tmp < 0)
+                               return;
+                       cf.data[i] = (tmp << 4);
+                       tmp = hex_to_bin(*cmd++);
+                       if (tmp < 0)
+                               return;
+                       cf.data[i] |= tmp;
+               }
        }
 
        skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
 /* parse tty input stream */
 static void slcan_unesc(struct slcan *sl, unsigned char s)
 {
-
        if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
                if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
                    (sl->rcount > 4))  {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
 /* Encapsulate one can_frame and stuff into a TTY queue. */
 static void slc_encaps(struct slcan *sl, struct can_frame *cf)
 {
-       int actual, idx, i;
-       char cmd;
+       int actual, i;
+       unsigned char *pos;
+       unsigned char *endpos;
+       canid_t id = cf->can_id;
+
+       pos = sl->xbuff;
 
        if (cf->can_id & CAN_RTR_FLAG)
-               cmd = 'R'; /* becomes 'r' in standard frame format */
+               *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
        else
-               cmd = 'T'; /* becomes 't' in standard frame format */
+               *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
 
-       if (cf->can_id & CAN_EFF_FLAG)
-               sprintf(sl->xbuff, "%c%08X%d", cmd,
-                       cf->can_id & CAN_EFF_MASK, cf->can_dlc);
-       else
-               sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
-                       cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+       /* determine number of chars for the CAN-identifier */
+       if (cf->can_id & CAN_EFF_FLAG) {
+               id &= CAN_EFF_MASK;
+               endpos = pos + SLC_EFF_ID_LEN;
+       } else {
+               *pos |= 0x20; /* convert R/T to lower case for SFF */
+               id &= CAN_SFF_MASK;
+               endpos = pos + SLC_SFF_ID_LEN;
+       }
 
-       idx = strlen(sl->xbuff);
+       /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+       pos++;
+       while (endpos >= pos) {
+               *endpos-- = hex_asc_upper[id & 0xf];
+               id >>= 4;
+       }
+
+       pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
 
-       for (i = 0; i < cf->can_dlc; i++)
-               sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+       *pos++ = cf->can_dlc + '0';
+
+       /* RTR frames may have a dlc > 0 but they never have any data bytes */
+       if (!(cf->can_id & CAN_RTR_FLAG)) {
+               for (i = 0; i < cf->can_dlc; i++)
+                       pos = hex_byte_pack_upper(pos, cf->data[i]);
+       }
 
-       strcat(sl->xbuff, "\r"); /* add terminating character */
+       *pos++ = '\r';
 
        /* Order of next two lines is *very* important.
         * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
         *       14 Oct 1994  Dmitry Gorodchanin.
         */
        set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
-       actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
-       sl->xleft = strlen(sl->xbuff) - actual;
+       actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+       sl->xleft = (pos - sl->xbuff) - actual;
        sl->xhead = sl->xbuff + actual;
        sl->dev->stats.tx_bytes += cf->can_dlc;
 }
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
                return;
 
+       spin_lock(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               spin_unlock(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
+       spin_unlock(&sl->lock);
 }
 
 /* Send a can_frame to a TTY queue. */
index afd7d85b69155a4259748b2c2648bc2739835d38..35f062282dbdc278fbc9b040395f14935b7a1ac5 100644 (file)
@@ -71,34 +71,34 @@ struct softing {
        } id;
 };
 
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
 
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
 
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
 
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
-               const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+                              const char *msg);
 
 /* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
-                       __iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+                   __iomem uint8_t *virt, unsigned int size, int offset);
 
 /* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
 
 /*
  * enable or disable irq
  * only called with fw.lock locked
  */
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
 
 /* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
 
 /* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
-               const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+                     ktime_t ktime);
 
 /* SOFTING DPRAM mappings */
 #define DPRAM_RX               0x0000
index 65eef1eea2e2434ca47cfeb086095a415d2b385e..6cd5c01b624d592e6fce2c543075822e5751561b 100644 (file)
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
 
 static int softing_pdev_probe(struct platform_device *pdev)
 {
-       const struct softing_platform_data *pdat = pdev->dev.platform_data;
+       const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
        struct softing *card;
        struct net_device *netdev;
        struct softing_priv *priv;
index 3a349a22d5bc46eed31bdc32e12d27c25df3bd13..beb5ef834f0fb4de00a703cc4a5db1c79863ecc1 100644 (file)
@@ -894,7 +894,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
        void __iomem *addr;
        int err = -ENODEV;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data\n");
                goto probe_exit;
index a0f647f92bf55c7388034f9fbd0a377cc1225901..0b7a4c3b01a2976176878607bd457a9ea282e21e 100644 (file)
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (i < PCAN_USB_MAX_TX_URBS) {
                if (i == 0) {
                        netdev_err(netdev, "couldn't setup any tx URB\n");
-                       return err;
+                       goto err_tx;
                }
 
                netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (dev->adapter->dev_start) {
                err = dev->adapter->dev_start(dev);
                if (err)
-                       goto failed;
+                       goto err_adapter;
        }
 
        dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 1);
                if (err)
-                       goto failed;
+                       goto err_adapter;
        }
 
        dev->can.state = CAN_STATE_ERROR_ACTIVE;
 
        return 0;
 
-failed:
+err_adapter:
        if (err == -ENODEV)
                netif_device_detach(dev->netdev);
 
        netdev_warn(netdev, "couldn't submit control: %d\n", err);
 
+       for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
+               usb_free_urb(dev->tx_contexts[i].urb);
+               dev->tx_contexts[i].urb = NULL;
+       }
+err_tx:
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+
        return err;
 }
 
index f00c76377b446cb63999857138e4f0365eccc015..65b735d4a6ad8aa92dc4cea9490f3f14fe9bbab8 100644 (file)
@@ -35,7 +35,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on (ISA || EISA) && ISA_DMA_API
+       depends on ISA && ISA_DMA_API
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
        select MII
        ---help---
          This option enables driver support for a large number of 10Mbps and
-         10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+         10/100Mbps EISA, PCI and Cardbus 3Com network cards:
 
          "Vortex"    (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
          "Boomerang" (EtherLink XL 3c900 or 3c905)            PCI
index 144942f6372b68edd1bedc91175b8feab07cbe6e..465cc7108d8a5e5bbb2c34df44f7bd0329d02d6b 100644 (file)
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_clear_mwi(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index ef325ffa1b5ac63dd61dd0c0230c8a63a1a4a6cf..2923c51bb351c38e2cc7d728056bdcb6f5c25a97 100644 (file)
@@ -28,42 +28,42 @@ extern int ei_debug;
 #endif
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
 #endif
 
 
 /* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops ei_netdev_ops;
 
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
 static inline struct net_device *alloc_ei_netdev(void)
 {
        return __alloc_ei_netdev(0);
 }
 
 /* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops eip_netdev_ops;
 
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
 static inline struct net_device *alloc_eip_netdev(void)
 {
        return __alloc_eip_netdev(0);
index becef25fa194174e1a18a1a7cfbe2fd0c606f1f6..0988811f4e40e2e4effd4fa92a665b67d34291b5 100644 (file)
@@ -146,13 +146,6 @@ config PCMCIA_PCNET
          To compile this driver as a module, choose M here: the module will be
          called pcnet_cs.  If unsure, say N.
 
-config NE_H8300
-       tristate "NE2000 compatible support for H8/300"
-       depends on H8300H_AKI3068NET || H8300H_H8MAX
-       ---help---
-         Say Y here if you want to use the NE2000 compatible
-         controller on the Renesas H8/300 processor.
-
 config STNIC
        tristate "National DP83902AV  support"
        depends on SUPERH
index 588954a79b2ae657bf905b0246a0b2753f72a5ed..ff3b31894188261bb4ec865cd289a34e1583b970 100644 (file)
@@ -10,7 +10,6 @@ obj-$(CONFIG_HYDRA) += hydra.o 8390.o
 obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
 obj-$(CONFIG_NE2000) += ne.o 8390p.o
 obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
 obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
 obj-$(CONFIG_STNIC) += stnic.o 8390.o
index f92f001551dab53152bf276d189e88b1f984e3b5..36fa577970bbae8741254519c0450d497a286d7e 100644 (file)
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
                        for (i = 0; i < 16; i++)
                                SA_prom[i] = SA_prom[i+i];
 
-               memcpy(dev->dev_addr, SA_prom, 6);
+               memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
        }
 
 #ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
deleted file mode 100644 (file)
index 7fc28f2..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
-/*
-    original ne.c
-    Written 1992-94 by Donald Becker.
-
-    Copyright 1993 United States Government as represented by the
-    Director, National Security Agency.
-
-    This software may be used and distributed according to the terms
-    of the GNU General Public License, incorporated herein by reference.
-
-    The author may be reached as becker@scyld.com, or C/O
-    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
-
-    H8/300 modified
-    Yoshinori Sato <ysato@users.sourceforge.jp>
-*/
-
-static const char version1[] =
-"ne-h8300.c:v1.00 2004/04/11 ysato\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define EI_SHIFT(x)    (ei_local->reg_offset[x])
-
-#include "8390.h"
-
-#define DRV_NAME "ne-h8300"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE   0x40 */
-
-/* A zero-terminated list of I/O addresses to be probed at boot. */
-
-/* ---- No user-serviceable parts below ---- */
-
-static const char version[] =
-    "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include "lib8390.c"
-
-#define NE_BASE         (dev->base_addr)
-#define NE_CMD         0x00
-#define NE_DATAPORT    (ei_status.word16?0x20:0x10)    /* NatSemi-defined port window offset. */
-#define NE_RESET       (ei_status.word16?0x3f:0x1f)    /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT   (ei_status.word16?0x40:0x20)
-
-#define NESM_START_PG  0x40    /* First page of TX buffer */
-#define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
-
-static int ne_probe1(struct net_device *dev, int ioaddr);
-
-static int ne_open(struct net_device *dev);
-static int ne_close(struct net_device *dev);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
-                         int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
-                         struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
-               const unsigned char *buf, const int start_page);
-
-
-static u32 reg_offset[16];
-
-static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       int i;
-       unsigned char bus_width;
-
-       bus_width = *(volatile unsigned char *)ABWCR;
-       bus_width &= 1 << ((base_addr >> 21) & 7);
-
-       for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
-               if (bus_width == 0)
-                       reg_offset[i] = i * 2 + 1;
-               else
-                       reg_offset[i] = i;
-
-       ei_local->reg_offset = reg_offset;
-       return 0;
-}
-
-static int __initdata h8300_ne_count = 0;
-#ifdef CONFIG_H8300H_H8MAX
-static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
-static int h8300_ne_irq[] = {EXT_IRQ4};
-#endif
-#ifdef CONFIG_H8300H_AKI3068NET
-static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
-static int h8300_ne_irq[] = {EXT_IRQ5};
-#endif
-
-static inline int init_dev(struct net_device *dev)
-{
-       if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
-               dev->base_addr = h8300_ne_base[h8300_ne_count];
-               dev->irq       = h8300_ne_irq[h8300_ne_count];
-               h8300_ne_count++;
-               return 0;
-       } else
-               return -ENODEV;
-}
-
-/*  Probe for various non-shared-memory ethercards.
-
-   NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
-   buffer memory space.  NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
-   the SAPROM, while other supposed NE2000 clones must be detected by their
-   SA prefix.
-
-   Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
-   mode results in doubled values, which can be detected and compensated for.
-
-   The probe is also responsible for initializing the card and filling
-   in the 'dev' and 'ei_status' structures.
-
-   We use the minimum memory size for some ethercard product lines, iff we can't
-   distinguish models.  You can increase the packet buffer size by setting
-   PACKETBUF_MEMSIZE.  Reported Cabletron packet buffer locations are:
-       E1010   starts at 0x100 and ends at 0x2000.
-       E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
-       E2010    starts at 0x100 and ends at 0x4000.
-       E2010-x starts at 0x100 and ends at 0xffff.  */
-
-static int __init do_ne_probe(struct net_device *dev)
-{
-       unsigned int base_addr = dev->base_addr;
-
-       /* First check any supplied i/o locations. User knows best. <cough> */
-       if (base_addr > 0x1ff)  /* Check a single specified location. */
-               return ne_probe1(dev, base_addr);
-       else if (base_addr != 0)        /* Don't probe at all. */
-               return -ENXIO;
-
-       return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-       free_irq(dev->irq, dev);
-       release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init ne_probe(int unit)
-{
-       struct net_device *dev = ____alloc_ei_netdev(0);
-       int err;
-
-       if (!dev)
-               return ERR_PTR(-ENOMEM);
-
-       if (init_dev(dev))
-               return ERR_PTR(-ENODEV);
-
-       sprintf(dev->name, "eth%d", unit);
-       netdev_boot_setup_check(dev);
-
-       err = init_reg_offset(dev, dev->base_addr);
-       if (err)
-               goto out;
-
-       err = do_ne_probe(dev);
-       if (err)
-               goto out;
-       return dev;
-out:
-       free_netdev(dev);
-       return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ne_netdev_ops = {
-       .ndo_open               = ne_open,
-       .ndo_stop               = ne_close,
-
-       .ndo_start_xmit         = __ei_start_xmit,
-       .ndo_tx_timeout         = __ei_tx_timeout,
-       .ndo_get_stats          = __ei_get_stats,
-       .ndo_set_rx_mode        = __ei_set_multicast_list,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_change_mtu         = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = __ei_poll,
-#endif
-};
-
-static int __init ne_probe1(struct net_device *dev, int ioaddr)
-{
-       int i;
-       unsigned char SA_prom[16];
-       int wordlength = 2;
-       const char *name = NULL;
-       int start_page, stop_page;
-       int reg0, ret;
-       static unsigned version_printed;
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned char bus_width;
-
-       if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
-               return -EBUSY;
-
-       reg0 = inb_p(ioaddr);
-       if (reg0 == 0xFF) {
-               ret = -ENODEV;
-               goto err_out;
-       }
-
-       /* Do a preliminary verification that we have a 8390. */
-       {
-               int regd;
-               outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
-               regd = inb_p(ioaddr + EI_SHIFT(0x0d));
-               outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
-               outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
-               inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
-               if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
-                       outb_p(reg0, ioaddr + EI_SHIFT(0));
-                       outb_p(regd, ioaddr + EI_SHIFT(0x0d));  /* Restore the old values. */
-                       ret = -ENODEV;
-                       goto err_out;
-               }
-       }
-
-       if (ei_debug  &&  version_printed++ == 0)
-               printk(KERN_INFO "%s", version1);
-
-       printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
-
-       /* Read the 16 bytes of station address PROM.
-          We must first initialize registers, similar to NS8390_init(eifdev, 0).
-          We can't reliably read the SAPROM address without this.
-          (I learned the hard way!). */
-       {
-               struct {unsigned char value, offset; } program_seq[] =
-               {
-                       {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
-                       {0x48,  EN0_DCFG},      /* Set byte-wide (0x48) access. */
-                       {0x00,  EN0_RCNTLO},    /* Clear the count regs. */
-                       {0x00,  EN0_RCNTHI},
-                       {0x00,  EN0_IMR},       /* Mask completion irq. */
-                       {0xFF,  EN0_ISR},
-                       {E8390_RXOFF, EN0_RXCR},        /* 0x20  Set to monitor */
-                       {E8390_TXOFF, EN0_TXCR},        /* 0x02  and loopback mode. */
-                       {32,    EN0_RCNTLO},
-                       {0x00,  EN0_RCNTHI},
-                       {0x00,  EN0_RSARLO},    /* DMA starting at 0x0000. */
-                       {0x00,  EN0_RSARHI},
-                       {E8390_RREAD+E8390_START, E8390_CMD},
-               };
-
-               for (i = 0; i < ARRAY_SIZE(program_seq); i++)
-                       outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
-
-       }
-       bus_width = *(volatile unsigned char *)ABWCR;
-       bus_width &= 1 << ((ioaddr >> 21) & 7);
-       ei_status.word16 = (bus_width == 0); /* temporary setting */
-       for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
-               SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
-               inb_p(ioaddr + NE_DATAPORT); /* dummy read */
-       }
-
-       start_page = NESM_START_PG;
-       stop_page = NESM_STOP_PG;
-
-       if (bus_width)
-               wordlength = 1;
-       else
-               outb_p(0x49, ioaddr + EN0_DCFG);
-
-       /* Set up the rest of the parameters. */
-       name = (wordlength == 2) ? "NE2000" : "NE1000";
-
-       if (! dev->irq) {
-               printk(" failed to detect IRQ line.\n");
-               ret = -EAGAIN;
-               goto err_out;
-       }
-
-       /* Snarf the interrupt now.  There's no point in waiting since we cannot
-          share and the board will usually be enabled. */
-       ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
-       if (ret) {
-               printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
-               goto err_out;
-       }
-
-       dev->base_addr = ioaddr;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               dev->dev_addr[i] = SA_prom[i];
-       printk(" %pM\n", dev->dev_addr);
-
-       printk("%s: %s found at %#x, using IRQ %d.\n",
-               dev->name, name, ioaddr, dev->irq);
-
-       ei_status.name = name;
-       ei_status.tx_start_page = start_page;
-       ei_status.stop_page = stop_page;
-       ei_status.word16 = (wordlength == 2);
-
-       ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
-        /* Allow the packet buffer size to be overridden by know-it-alls. */
-       ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
-       ei_status.reset_8390 = &ne_reset_8390;
-       ei_status.block_input = &ne_block_input;
-       ei_status.block_output = &ne_block_output;
-       ei_status.get_8390_hdr = &ne_get_8390_hdr;
-       ei_status.priv = 0;
-
-       dev->netdev_ops = &ne_netdev_ops;
-
-       __NS8390_init(dev, 0);
-
-       ret = register_netdev(dev);
-       if (ret)
-               goto out_irq;
-       return 0;
-out_irq:
-       free_irq(dev->irq, dev);
-err_out:
-       release_region(ioaddr, NE_IO_EXTENT);
-       return ret;
-}
-
-static int ne_open(struct net_device *dev)
-{
-       __ei_open(dev);
-       return 0;
-}
-
-static int ne_close(struct net_device *dev)
-{
-       if (ei_debug > 1)
-               printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
-       __ei_close(dev);
-       return 0;
-}
-
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-
-static void ne_reset_8390(struct net_device *dev)
-{
-       unsigned long reset_start_time = jiffies;
-       struct ei_device *ei_local = netdev_priv(dev);
-
-       if (ei_debug > 1)
-               printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
-
-       /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
-       outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
-       ei_status.txing = 0;
-       ei_status.dmaing = 0;
-
-       /* This check _should_not_ be necessary, omit eventually. */
-       while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
-               if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-                       printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
-                       break;
-               }
-       outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
-   we don't need to be concerned with ring wrap as the header will be at
-   the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
-                       "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-
-       ei_status.dmaing |= 0x01;
-       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
-       outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
-       outb_p(0, NE_BASE + EN0_RCNTHI);
-       outb_p(0, NE_BASE + EN0_RSARLO);                /* On page boundary */
-       outb_p(ring_page, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-
-       if (ei_status.word16) {
-               int len;
-               unsigned short *p = (unsigned short *)hdr;
-               for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
-                       *p++ = inw(NE_BASE + NE_DATAPORT);
-       } else
-               insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-
-       le16_to_cpus(&hdr->count);
-}
-
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for hints.
-   The NEx000 doesn't share the on-board packet memory -- you have to put
-   the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-#ifdef NE_SANITY_CHECK
-       int xfer_count = count;
-#endif
-       char *buf = skb->data;
-
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
-                       "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-       ei_status.dmaing |= 0x01;
-       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
-       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
-       outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
-       outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
-       outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-       if (ei_status.word16)
-       {
-               int len;
-               unsigned short *p = (unsigned short *)buf;
-               for (len = count>>1; len > 0; len--)
-                       *p++ = inw(NE_BASE + NE_DATAPORT);
-               if (count & 0x01)
-               {
-                       buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
-                       xfer_count++;
-#endif
-               }
-       } else {
-               insb(NE_BASE + NE_DATAPORT, buf, count);
-       }
-
-#ifdef NE_SANITY_CHECK
-       /* This was for the ALPHA version only, but enough people have
-          been encountering problems so it is still here.  If you see
-          this message you either 1) have a slightly incompatible clone
-          or 2) have noise/speed problems with your bus. */
-
-       if (ei_debug > 1)
-       {
-               /* DMA termination address check... */
-               int addr, tries = 20;
-               do {
-                       /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
-                          -- it's broken for Rx on some cards! */
-                       int high = inb_p(NE_BASE + EN0_RSARHI);
-                       int low = inb_p(NE_BASE + EN0_RSARLO);
-                       addr = (high << 8) + low;
-                       if (((ring_offset + xfer_count) & 0xff) == low)
-                               break;
-               } while (--tries > 0);
-               if (tries <= 0)
-                       printk(KERN_WARNING "%s: RX transfer address mismatch,"
-                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
-                               dev->name, ring_offset + xfer_count, addr);
-       }
-#endif
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
-               const unsigned char *buf, const int start_page)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
-       int retries = 0;
-#endif
-
-       /* Round the count up for word writes.  Do we need to do this?
-          What effect will an odd byte count have on the 8390?
-          I should check someday. */
-
-       if (ei_status.word16 && (count & 0x01))
-               count++;
-
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
-                       "[DMAstat:%d][irqlock:%d]\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-       ei_status.dmaing |= 0x01;
-       /* We should already be in page 0, but to be safe... */
-       outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
-       /* Handle the read-before-write bug the same way as the
-          Crynwr packet driver -- the NatSemi method doesn't work.
-          Actually this doesn't always work either, but if you have
-          problems with your NEx000 this is better than nothing! */
-
-       outb_p(0x42, NE_BASE + EN0_RCNTLO);
-       outb_p(0x00, NE_BASE + EN0_RCNTHI);
-       outb_p(0x42, NE_BASE + EN0_RSARLO);
-       outb_p(0x00, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-       /* Make certain that the dummy read has occurred. */
-       udelay(6);
-#endif
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
-
-       /* Now the normal output. */
-       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
-       outb_p(count >> 8,   NE_BASE + EN0_RCNTHI);
-       outb_p(0x00, NE_BASE + EN0_RSARLO);
-       outb_p(start_page, NE_BASE + EN0_RSARHI);
-
-       outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
-       if (ei_status.word16) {
-               int len;
-               unsigned short *p = (unsigned short *)buf;
-               for (len = count>>1; len > 0; len--)
-                       outw(*p++, NE_BASE + NE_DATAPORT);
-       } else {
-               outsb(NE_BASE + NE_DATAPORT, buf, count);
-       }
-
-       dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
-       /* This was for the ALPHA version only, but enough people have
-          been encountering problems so it is still here. */
-
-       if (ei_debug > 1)
-       {
-               /* DMA termination address check... */
-               int addr, tries = 20;
-               do {
-                       int high = inb_p(NE_BASE + EN0_RSARHI);
-                       int low = inb_p(NE_BASE + EN0_RSARLO);
-                       addr = (high << 8) + low;
-                       if ((start_page << 8) + count == addr)
-                               break;
-               } while (--tries > 0);
-
-               if (tries <= 0)
-               {
-                       printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
-                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
-                               dev->name, (start_page << 8) + count, addr);
-                       if (retries++ == 0)
-                               goto retry;
-               }
-       }
-#endif
-
-       while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
-               if (time_after(jiffies, dma_start + 2*HZ/100)) {                /* 20ms */
-                       printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
-                       ne_reset_8390(dev);
-                       __NS8390_init(dev,1);
-                       break;
-               }
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-}
-
-
-#ifdef MODULE
-#define MAX_NE_CARDS   1       /* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS];  /* 0xbad = bad sig or no reset ack */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that no ISA autoprobe takes place. We can't guarantee
-that the ne2k probe is the last 8390 based probe to take place (as it
-is at boot) and so the probe will get confused by any other 8390 cards.
-ISA device autoprobes on a running machine are not recommended anyway. */
-
-int init_module(void)
-{
-       int this_dev, found = 0;
-       int err;
-
-       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = ____alloc_ei_netdev(0);
-               if (!dev)
-                       break;
-               if (io[this_dev]) {
-                       dev->irq = irq[this_dev];
-                       dev->mem_end = bad[this_dev];
-                       dev->base_addr = io[this_dev];
-               } else {
-                       dev->base_addr = h8300_ne_base[this_dev];
-                       dev->irq = h8300_ne_irq[this_dev];
-               }
-               err = init_reg_offset(dev, dev->base_addr);
-               if (!err) {
-                       if (do_ne_probe(dev) == 0) {
-                               dev_ne[found++] = dev;
-                               continue;
-                       }
-               }
-               free_netdev(dev);
-               if (found)
-                       break;
-               if (io[this_dev] != 0)
-                       printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
-               else
-                       printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
-               return -ENXIO;
-       }
-       if (found)
-               return 0;
-       return -ENODEV;
-}
-
-void cleanup_module(void)
-{
-       int this_dev;
-
-       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = dev_ne[this_dev];
-               if (dev) {
-                       unregister_netdev(dev);
-                       cleanup_card(dev);
-                       free_netdev(dev);
-               }
-       }
-}
-#endif /* MODULE */
index 92201080e07a104b4e25d2f308a7b62d8f0099ba..fc14a85e4d5ffdde31d42c40dac5f6af7e80c946 100644 (file)
@@ -389,9 +389,7 @@ err_out_free_netdev:
        free_netdev (dev);
 err_out_free_res:
        release_region (ioaddr, NE_IO_EXTENT);
-       pci_set_drvdata (pdev, NULL);
        return -ENODEV;
-
 }
 
 /*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
        release_region(dev->base_addr, NE_IO_EXTENT);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 8b04bfc20cfba3b9c17984471dfccb6163799bdb..171d73c1d3c22de3209ca6c48b7978c35b2a38b0 100644 (file)
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        iounmap(base);
 err_out_free_res:
        pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
        iounmap(np->base);
        pci_release_regions(pdev);
 
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);                       /* Will also free np!! */
 }
 
index 7a07ee07906b280f2d849ae0959c604ee58d411b..6dec86ac97cdbfb26977c382d73cf6a849582895 100644 (file)
@@ -104,6 +104,6 @@ struct bfin_mac_local {
 #endif
 };
 
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
 
 #endif
index 0a5837b9642194799bedaf26abf3721bafb94579..ae33a99bf47626695b239cdb78941ea09b723b02 100644 (file)
@@ -242,13 +242,13 @@ struct lance_private
 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
 
 /* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
 #endif
 
 #endif /* ndef _7990_H */
index 1b1429d5d5c287f761a3066bc6bbebe6b74995ee..d042511bdc1365882a3bea08626bfce6bcc714da 100644 (file)
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
 
 err_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 
 }
index 10ceca523fc0dc9556733d3c1eeded045ab70303..e07ce5ff2d48bf93e2a3daeb102fb4b78d471532 100644 (file)
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
        switch( lp->cardtype ) {
          case OLD_RIEBL:
                /* No ethernet address! (Set some default address) */
-               memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+               memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
                break;
          case NEW_RIEBL:
-               lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+               lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
                break;
          case PAM_CARD:
                i = IO->eeprom;
index 91d52b495848a17d4b9bae0de04b7d1f7c798ba9..427c148bb643538c1d2240036a9dd7cad1be9596 100644 (file)
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
                aup->phy1_search_mac0 = 1;
        } else {
                if (is_valid_ether_addr(pd->mac)) {
-                       memcpy(dev->dev_addr, pd->mac, 6);
+                       memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
                } else {
                        /* Set a random MAC since no valid provided by platform_data. */
                        eth_hw_addr_random(dev);
index 94edc9c6fbbf317765d41d8d4ba9cfc6b817ec20..57397295887c964b23f2bbb7cf7d25d908ab2b29 100644 (file)
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
                }
 
                clen = len & 1;
-               rtp = tp;
-               rfp = fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
                 * do the rest, if any.
                 */
                clen = len & 15;
-               rtp = (unsigned char *) tp;
-               rfp = (unsigned char *) fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
 
                clen = len & 1;
 
-               rtp = tp;
-               rfp = fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
 
                while (clen--) {
                        *rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
                 * do the rest, if any.
                 */
                clen = len & 15;
-               rtp = (unsigned char *) tp;
-               rfp = (unsigned char *) fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -725,7 +725,6 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
 
-       clear_ioasic_dma_irq(irq);
        printk(KERN_ERR "%s: DMA error\n", dev->name);
        return IRQ_HANDLED;
 }
@@ -812,7 +811,7 @@ static int lance_open(struct net_device *dev)
        if (lp->dma_irq >= 0) {
                unsigned long flags;
 
-               if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
+               if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
                                "lance error", dev)) {
                        free_irq(dev->irq, dev);
                        printk("%s: Can't get DMA IRQ %d\n", dev->name,
index 5c728436b85e7df0cf9421fa6185e45c2b52bd3c..256f590f6bb1a6db167f4375b3ed2719d2ad5073 100644 (file)
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
        int i;
 
        if (dev->irq == 0 ||
-               request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+               request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
                return -EAGAIN;
        }
 
index 2d8e28819779eed0388109791fe0126f5af763f8..38492e0b704e3aab926ec5fd300b61028223d504 100644 (file)
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                                pr_cont(" warning: CSR address invalid,\n");
                                pr_info("    using instead PROM address of");
                        }
-                       memcpy(dev->dev_addr, promaddr, 6);
+                       memcpy(dev->dev_addr, promaddr, ETH_ALEN);
                }
        }
 
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
                                    lp->init_block, lp->init_dma_addr);
                free_netdev(dev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index a597b766f0809d3b1e1893e9ec008fcdc813405f..daae0e01625360598194c76fcc4239a3283bc85c 100644 (file)
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
        if (skb != NULL) {
                data = skb_put(skb, ETHERMINPACKET);
                memset(data, 0, ETHERMINPACKET);
-               memcpy(data, dev->dev_addr, 6);
-               memcpy(data+6, dev->dev_addr, 6);
+               memcpy(data, dev->dev_addr, ETH_ALEN);
+               memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
                bmac_transmit_packet(skb, dev);
        }
        spin_unlock_irqrestore(&bp->lock, flags);
index fc95b235e210d058545efd889537c2a8dea08a45..5aa5e8146496ba807ece60fe5b287d70a79a8ad4 100644 (file)
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
 
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        free_netdev(alx->dev);
 }
index 0f0556526ba90bff7f764c4005d6f6e850220dd7..7f9369a3b378320762c89cc9369964d265fda6c9 100644 (file)
@@ -600,7 +600,7 @@ struct atl1c_adapter {
 extern char atl1c_driver_name[];
 extern char atl1c_driver_version[];
 
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1C_H_ */
index 3ef7092e3f1c5ff0fd31cb53a0253a0c7532d711..1cda49a28f7f0e9a672ae775a832eba31ad76fb3 100644 (file)
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
 {
        int i;
-       int ret = false;
+       bool ret = false;
        u32 otp_ctrl_data;
        u32 control;
        u32 data;
index b5fd934585e9f9f14595f60a2ec2e069cd2bed35..1b0fe2d04a0e88fcb8ac5e250e33384d66c0c68f 100644 (file)
@@ -499,10 +499,10 @@ struct atl1e_adapter {
 extern char atl1e_driver_name[];
 extern char atl1e_driver_version[];
 
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1_E_H_ */
index 1966444590f6192e862a713216182ab09052ebef..7a73f3a9fcb5e2a5b0ffbac1ed1d0bc0163769cb 100644 (file)
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
        }
 }
 
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+       if (features & NETIF_F_RXALL) {
+               /* enable RX of ALL frames */
+               *mac_ctrl_data |= MAC_CTRL_DBG;
+       } else {
+               /* disable RX of ALL frames */
+               *mac_ctrl_data &= ~MAC_CTRL_DBG;
+       }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+       netdev_features_t features)
+{
+       struct atl1e_adapter *adapter = netdev_priv(netdev);
+       u32 mac_ctrl_data = 0;
+
+       netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+       atl1e_irq_disable(adapter);
+       mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+       __atl1e_rx_mode(features, &mac_ctrl_data);
+       AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+       atl1e_irq_enable(adapter);
+}
+
+
 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                atl1e_vlan_mode(netdev, features);
 
+       if (changed & NETIF_F_RXALL)
+               atl1e_rx_mode(netdev, features);
+
+
        return 0;
 }
 
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
                value |= MAC_CTRL_PROMIS_EN;
        if (netdev->flags & IFF_ALLMULTI)
                value |= MAC_CTRL_MC_ALL_EN;
-
+       if (netdev->features & NETIF_F_RXALL)
+               value |= MAC_CTRL_DBG;
        AT_WRITE_REG(hw, REG_MAC_CTRL, value);
 }
 
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        rx_page_desc[que].rx_nxseq++;
 
                        /* error packet */
-                       if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+                       if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+                           !(netdev->features & NETIF_F_RXALL)) {
                                if (prrs->err_flag & (RRS_ERR_BAD_CRC |
                                        RRS_ERR_DRIBBLE | RRS_ERR_CODE |
                                        RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        }
 
                        packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
-                                       RRS_PKT_SIZE_MASK) - 4; /* CRC */
+                                       RRS_PKT_SIZE_MASK);
+                       if (likely(!(netdev->features & NETIF_F_RXFCS)))
+                               packet_size -= 4; /* CRC */
+
                        skb = netdev_alloc_skb_ip_align(netdev, packet_size);
                        if (skb == NULL)
                                goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
                              NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features = netdev->hw_features | NETIF_F_LLTX |
                           NETIF_F_HW_VLAN_CTAG_TX;
-
+       /* not enabled by default */
+       netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
        return 0;
 }
 
index 3ebe19f7242b6da9cd5d3290674f85cb9e164013..2f27d4c4c3ad4ae5b026983dba7d9478cd1c5430 100644 (file)
@@ -42,7 +42,7 @@
 #include "atlx.h"
 
 #ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
 #endif
 
 #define PCI_COMMAND_REGISTER   PCI_COMMAND
index 9b017d9c58e94e5ab671aeda2cfcc969cc561c6e..90e54d5488dcbda9b13a1820177630d2dd70b613 100644 (file)
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
 static void b44_tx(struct b44 *bp)
 {
        u32 cur, cons;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
        cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
+
+               bytes_compl += skb->len;
+               pkts_compl++;
+
                dev_kfree_skb_irq(skb);
        }
 
+       netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
        bp->tx_cons = cons;
        if (netif_queue_stopped(bp->dev) &&
            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (bp->flags & B44_FLAG_REORDER_BUG)
                br32(bp, B44_DMATX_PTR);
 
+       netdev_sent_queue(dev, skb->len);
+
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
 
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
 
        val = br32(bp, B44_ENET_CTRL);
        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+       netdev_reset_queue(bp->dev);
 }
 
 static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
         * valid PHY address. */
        bp->phy_addr &= 0x1F;
 
-       memcpy(bp->dev->dev_addr, addr, 6);
+       memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
 
        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
                pr_err("Invalid MAC address found in EEPROM\n");
@@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
                goto err_out_free_dev;
        }
 
-       if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
-           dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+       if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
                dev_err(sdev->dev,
                        "Required 30BIT DMA mask unsupported by the system\n");
                goto err_out_powerdown;
index 249468f953651480a5e0b897d582dd09743c6c3b..7eca5a1747337db6f2eed66803e81b081201e38f 100644 (file)
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        dma_desc->ctl0 = cpu_to_le32(ctl0);
        dma_desc->ctl1 = cpu_to_le32(ctl1);
 
+       netdev_sent_queue(net_dev, skb->len);
+
        wmb();
 
        /* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
        struct device *dma_dev = bgmac->core->dma_dev;
        int empty_slot;
        bool freed = false;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        /* The last slot that hardware didn't consume yet */
        empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                                         slot->skb->len, DMA_TO_DEVICE);
                        slot->dma_addr = 0;
 
+                       bytes_compl += slot->skb->len;
+                       pkts_compl++;
+
                        /* Free memory! :) */
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                freed = true;
        }
 
+       netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
        if (freed && netif_queue_stopped(bgmac->net_dev))
                netif_wake_queue(bgmac->net_dev);
 }
@@ -988,6 +996,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
        bgmac_miiconfig(bgmac);
        bgmac_phy_init(bgmac);
 
+       netdev_reset_queue(bgmac->net_dev);
+
        bgmac->int_status = 0;
 }
 
index e838a3f74b696dd208680498f3350a3349a65392..d9980ad00b4b8d65bc23061fd71d28e936d48cb1 100644 (file)
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
-       memcpy(packet, bp->dev->dev_addr, 6);
-       memset(packet + 6, 0x0, 8);
+       memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+       memset(packet + ETH_ALEN, 0x0, 8);
        for (i = 14; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
 
@@ -8413,7 +8413,6 @@ err_out_release:
 
 err_out_disable:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
 err_out:
        return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
-       memcpy(dev->dev_addr, bp->mac_addr, 6);
+       memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
        pci_iounmap(pdev, bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_free:
        free_netdev(dev);
        return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int
index 97b3d32a98bd010ab1a1327ef7382e5bd64139b8..4e01c57d8c8de349da61e80cb0e4cb0f4aece3fd 100644 (file)
@@ -1197,8 +1197,9 @@ union cdu_context {
 /* TM (timers) host DB constants */
 #define TM_ILT_PAGE_SZ_HW      0
 #define TM_ILT_PAGE_SZ         (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM         (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM            1024
+#define TM_CONN_NUM            (BNX2X_FIRST_VF_CID + \
+                                BNX2X_VF_CIDS + \
+                                CNIC_ISCSI_CID_MAX)
 #define TM_ILT_SZ              (8 * TM_CONN_NUM)
 #define TM_ILT_LINES           DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 
@@ -1527,7 +1528,6 @@ struct bnx2x {
 #define PCI_32BIT_FLAG                 (1 << 1)
 #define ONE_PORT_FLAG                  (1 << 2)
 #define NO_WOL_FLAG                    (1 << 3)
-#define USING_DAC_FLAG                 (1 << 4)
 #define USING_MSIX_FLAG                        (1 << 5)
 #define USING_MSI_FLAG                 (1 << 6)
 #define DISABLE_MSI_FLAG               (1 << 7)
@@ -1546,6 +1546,7 @@ struct bnx2x {
 #define IS_VF_FLAG                     (1 << 22)
 #define INTERRUPTS_ENABLED_FLAG                (1 << 23)
 #define BC_SUPPORTS_RMMOD_CMD          (1 << 24)
+#define HAS_PHYS_PORT_ID               (1 << 25)
 
 #define BP_NOMCP(bp)                   ((bp)->flags & NO_MCP_FLAG)
 
@@ -1621,7 +1622,7 @@ struct bnx2x {
        u16                     rx_ticks_int;
        u16                     rx_ticks;
 /* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT                (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT                (0xff*BNX2X_BTR)
 
        u32                     lin_cnt;
 
@@ -1876,6 +1877,8 @@ struct bnx2x {
        u32 dump_preset_idx;
        bool                                    stats_started;
        struct semaphore                        stats_sema;
+
+       u8                                      phys_port_id[ETH_ALEN];
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -2072,7 +2075,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 
 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
                               u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp);
 
 /* FLR related routines */
 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2231,7 +2235,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define BNX2X_NUM_TESTS_SF             7
 #define BNX2X_NUM_TESTS_MF             3
 #define BNX2X_NUM_TESTS(bp)            (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
-                                                    BNX2X_NUM_TESTS_SF)
+                                            IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
 
 #define BNX2X_PHY_LOOPBACK             0
 #define BNX2X_MAC_LOOPBACK             1
@@ -2491,11 +2495,9 @@ enum {
 
 #define NUM_MACS       8
 
-enum bnx2x_pci_bus_speed {
-       BNX2X_PCI_LINK_SPEED_2500 = 2500,
-       BNX2X_PCI_LINK_SPEED_5000 = 5000,
-       BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
 void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+       (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
 #endif /* bnx2x.h */
index 61726af1de6ede3c111d07040e5dec8f4440884a..6e46cff5236da1b835bc7698085fb34b657e3185 100644 (file)
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
        }
 #endif
+       skb_record_rx_queue(skb, fp->rx_queue);
        napi_gro_receive(&fp->napi, skb);
 }
 
@@ -2481,8 +2482,7 @@ load_error_cnic2:
 load_error_cnic1:
        bnx2x_napi_disable_cnic(bp);
        /* Update the number of queues without the cnic queues */
-       rc = bnx2x_set_real_num_queues(bp, 0);
-       if (rc)
+       if (bnx2x_set_real_num_queues(bp, 0))
                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
 load_error_cnic0:
        BNX2X_ERR("CNIC-related load failed\n");
@@ -3256,14 +3256,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
        if (prot == IPPROTO_TCP)
                rc |= XMIT_CSUM_TCP;
 
-       if (skb_is_gso_v6(skb)) {
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V6;
-       } else if (skb_is_gso(skb)) {
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V4;
+       if (skb_is_gso(skb)) {
+               if (skb_is_gso_v6(skb)) {
+                       rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V6;
+               } else {
+                       rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V4;
+               }
        }
 
        return rc;
index 324de5f05332e78aa6c108d23891105880ee5bd8..32d0f1435fb410b54c3105236c14a211f35bc0b2 100644 (file)
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        int regdump_len = 0;
 
+       if (IS_VF(bp))
+               return 0;
+
        regdump_len = __bnx2x_get_regs_len(bp);
        regdump_len *= 4;
        regdump_len += sizeof(struct dump_header);
@@ -891,17 +894,8 @@ static void bnx2x_get_regs(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = DUMP_ALL_PRESETS;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +922,9 @@ static void bnx2x_get_regs(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_regs(bp, p);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
-
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 }
 
 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +978,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = bp->dump_preset_idx;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1008,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
 
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        return 0;
 }
 
@@ -2900,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
 
        memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
 
+       if (bnx2x_test_nvram(bp) != 0) {
+               if (!IS_MF(bp))
+                       buf[4] = 1;
+               else
+                       buf[0] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
        if (!netif_running(dev)) {
-               DP(BNX2X_MSG_ETHTOOL,
-                  "Can't perform self-test when interface is down\n");
+               DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
                return;
        }
 
@@ -2964,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
                /* wait until link state is restored */
                bnx2x_wait_for_link(bp, link_up, is_serdes);
        }
-       if (bnx2x_test_nvram(bp) != 0) {
-               if (!IS_MF(bp))
-                       buf[4] = 1;
-               else
-                       buf[0] = 1;
-               etest->flags |= ETH_TEST_FL_FAILED;
-       }
+
        if (bnx2x_test_intr(bp) != 0) {
                if (!IS_MF(bp))
                        buf[5] = 1;
index 32767f6aa33f473a126259e4877fa608ed51b3bf..cf1df8b62e2c2785c0560b77ac5ed4b5fc8ae8b3 100644 (file)
@@ -172,6 +172,7 @@ struct shared_hw_cfg {                       /* NVRAM Offset */
                #define SHARED_HW_CFG_LED_MAC4                       0x000c0000
                #define SHARED_HW_CFG_LED_PHY8                       0x000d0000
                #define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+               #define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000
 
 
        #define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
index 76df015f486ad9d1653efccde3538dbb47263849..c2dfea7968f452defdca6fd4b1ea68758381038e 100644 (file)
@@ -640,23 +640,35 @@ static const struct {
  * [30] MCP Latched ump_tx_parity
  * [31] MCP Latched scpad_parity
  */
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS      \
        (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
-        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+       (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 
 /* Below registers control the MCP parity attention output. When
  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
  * enabled, when cleared - disabled.
  */
-static const u32 mcp_attn_ctl_regs[] = {
-       MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_0,
-       MISC_REG_AEU_ENABLE4_PXP_0,
-       MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_1,
-       MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+       u32 addr;
+       u32 bits;
+} mcp_attn_ctl_regs[] = {
+       { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
 };
 
 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
        u32 reg_val;
 
        for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
-               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
 
                if (enable)
-                       reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val |= mcp_attn_ctl_regs[i].bits;
                else
-                       reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val &= ~mcp_attn_ctl_regs[i].bits;
 
-               REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+               REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
        }
 }
 
index d60a2ea3da192a203a18cda07251c3e2ee60ab42..20dcc02431cac441a8bf3076a7fe6db1abfb04e9 100644 (file)
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
 #define EDC_MODE_LINEAR                                0x0022
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
+#define EDC_MODE_ACTIVE_DAC                    0x0066
 
 /* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
@@ -3121,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
 }
 
 static int bnx2x_bsc_read(struct link_params *params,
-                         struct bnx2x_phy *phy,
+                         struct bnx2x *bp,
                          u8 sl_devid,
                          u16 sl_addr,
                          u8 lc_addr,
@@ -3130,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
 {
        u32 val, i;
        int rc = 0;
-       struct bnx2x *bp = params->bp;
 
        if (xfer_cnt > 16) {
                DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -3684,6 +3684,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
        bnx2x_update_link_attr(params, vars->link_attr_sync);
 }
 
+static void bnx2x_disable_kr2(struct link_params *params,
+                             struct link_vars *vars,
+                             struct bnx2x_phy *phy)
+{
+       struct bnx2x *bp = params->bp;
+       int i;
+       static struct bnx2x_reg_set reg_set[] = {
+               /* Step 1 - Program the TX/RX alignment markers */
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+       };
+       DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+       for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+                                reg_set[i].val);
+       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+       vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
                                               struct link_params *params)
 {
@@ -3715,7 +3750,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                        struct link_params *params,
                                        struct link_vars *vars) {
        u16 lane, i, cl72_ctrl, an_adv = 0;
-       u16 ucode_ver;
        struct bnx2x *bp = params->bp;
        static struct bnx2x_reg_set reg_set[] = {
                {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3840,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 
        /* Advertise pause */
        bnx2x_ext_phy_set_pause(params, phy, vars);
-       /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
-        */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
-       if (ucode_ver < 0xd108) {
-               DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
-                              ucode_ver);
-               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
-       }
+       vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
 
@@ -3838,6 +3864,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                bnx2x_set_aer_mmd(params, phy);
 
                bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+       } else {
+               bnx2x_disable_kr2(params, vars, phy);
        }
 
        /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4375,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u32 serdes_net_if;
        u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
-       u16 lane = bnx2x_get_warpcore_lane(phy, params);
 
        vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
 
        if (!vars->turn_to_run_wc_rt)
                return;
 
-       /* Return if there is no link partner */
-       if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
-               DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
-               return;
-       }
-
        if (vars->rx_tx_asic_rst) {
+               u16 lane = bnx2x_get_warpcore_lane(phy, params);
                serdes_net_if = (REG_RD(bp, params->shmem_base +
                                offsetof(struct shmem_region, dev_info.
                                port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4397,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
                                /*10G KR*/
                        lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
 
-                       DP(NETIF_MSG_LINK,
-                               "gp_status1 0x%x\n", gp_status1);
-
                        if (lnkup_kr || lnkup) {
-                                       vars->rx_tx_asic_rst = 0;
-                                       DP(NETIF_MSG_LINK,
-                                       "link up, rx_tx_asic_rst 0x%x\n",
-                                       vars->rx_tx_asic_rst);
+                               vars->rx_tx_asic_rst = 0;
                        } else {
                                /* Reset the lane to see if link comes up.*/
                                bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4523,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                         * enabled transmitter to avoid current leakage in case
                         * no module is connected
                         */
-                       if (bnx2x_is_sfp_module_plugged(phy, params))
-                               bnx2x_sfp_module_detection(phy, params);
-                       else
-                               bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+                       if ((params->loopback_mode == LOOPBACK_NONE) ||
+                           (params->loopback_mode == LOOPBACK_EXT)) {
+                               if (bnx2x_is_sfp_module_plugged(phy, params))
+                                       bnx2x_sfp_module_detection(phy, params);
+                               else
+                                       bnx2x_sfp_e3_set_transmitter(params,
+                                                                    phy, 1);
+                       }
 
                        bnx2x_warpcore_config_sfi(phy, params);
                        break;
@@ -5757,6 +5777,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
        rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
                                         duplex);
 
+       /* In case of KR link down, start up the recovering procedure */
+       if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+           (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
        DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
                   vars->duplex, vars->flow_ctrl, vars->link_status);
        return rc;
@@ -6345,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
                         * intended override.
                         */
                        break;
-               } else
+               } else {
+                       u32 nig_led_mode = ((params->hw_led_mode <<
+                                            SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                                           SHARED_HW_CFG_LED_EXTPHY2) ?
+                               (SHARED_HW_CFG_LED_PHY1 >>
+                                SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                              hw_led_mode);
+                              nig_led_mode);
+               }
 
                REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
                /* Set blinking rate to ~15.9Hz */
@@ -6507,6 +6538,11 @@ static int bnx2x_link_initialize(struct link_params *params,
                        params->phy[INT_PHY].config_init(phy, params, vars);
        }
 
+       /* Re-read this value in case it was changed inside config_init due to
+        * limitations of optic module
+        */
+       vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
        /* Init external phy*/
        if (non_ext_phy) {
                if (params->phy[INT_PHY].supported &
@@ -7886,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                        usleep_range(1000, 2000);
                        bnx2x_warpcore_power_module(params, 1);
                }
-               rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+               rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
 
@@ -8080,7 +8116,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                if (copper_module_type &
                    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
                        DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
-                       check_limiting_mode = 1;
+                       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+                               *edc_mode = EDC_MODE_ACTIVE_DAC;
+                       else
+                               check_limiting_mode = 1;
                } else if (copper_module_type &
                        SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
                                DP(NETIF_MSG_LINK,
@@ -8555,6 +8594,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
                mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
                break;
        case EDC_MODE_PASSIVE_DAC:
+       case EDC_MODE_ACTIVE_DAC:
                mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
                break;
        default:
@@ -9730,32 +9770,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                         MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
                         an_1000_val);
 
-       /* set 100 speed advertisement */
-       if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
-            (phy->speed_cap_mask &
-             (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
-               an_10_100_val |= (1<<7);
-               /* Enable autoneg and restart autoneg for legacy speeds */
-               autoneg_val |= (1<<9 | 1<<12);
-
-               if (phy->req_duplex == DUPLEX_FULL)
+       /* Set 10/100 speed advertisement */
+       if (phy->req_line_speed == SPEED_AUTO_NEG) {
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+                       /* Enable autoneg and restart autoneg for legacy speeds
+                        */
+                       autoneg_val |= (1<<9 | 1<<12);
                        an_10_100_val |= (1<<8);
-               DP(NETIF_MSG_LINK, "Advertising 100M\n");
-       }
-       /* set 10 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
-            (phy->speed_cap_mask &
-             (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
-            (phy->supported &
-             (SUPPORTED_10baseT_Half |
-              SUPPORTED_10baseT_Full)))) {
-               an_10_100_val |= (1<<5);
-               autoneg_val |= (1<<9 | 1<<12);
-               if (phy->req_duplex == DUPLEX_FULL)
+                       DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+               }
+
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+                       /* Enable autoneg and restart autoneg for legacy speeds
+                        */
+                       autoneg_val |= (1<<9 | 1<<12);
+                       an_10_100_val |= (1<<7);
+                       DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+               }
+
+               if ((phy->speed_cap_mask &
+                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+                   (phy->supported & SUPPORTED_10baseT_Full)) {
                        an_10_100_val |= (1<<6);
-               DP(NETIF_MSG_LINK, "Advertising 10M\n");
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+               }
+
+               if ((phy->speed_cap_mask &
+                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+                   (phy->supported & SUPPORTED_10baseT_Half)) {
+                       an_10_100_val |= (1<<5);
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+               }
        }
 
        /* Only 10/100 are allowed to work in FORCE mode */
@@ -10609,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         0x40);
 
                } else {
+                       /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+                        * sources are all wired through LED1, rather than only
+                        * 10G in other modes.
+                        */
+                       val = ((params->hw_led_mode <<
+                               SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                              SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
-                                        0x80);
+                                        val);
 
                        /* Tell LED3 to blink on source */
                        bnx2x_cl45_read(bp, phy,
@@ -13432,43 +13489,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
                }
        }
 }
-static void bnx2x_disable_kr2(struct link_params *params,
-                             struct link_vars *vars,
-                             struct bnx2x_phy *phy)
-{
-       struct bnx2x *bp = params->bp;
-       int i;
-       static struct bnx2x_reg_set reg_set[] = {
-               /* Step 1 - Program the TX/RX alignment markers */
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
-       };
-       DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
-       for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
-                                reg_set[i].val);
-       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
-
-       vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
-       /* Restart AN on leading lane */
-       bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
 static void bnx2x_kr2_recovery(struct link_params *params,
                               struct link_vars *vars,
                               struct bnx2x_phy *phy)
@@ -13546,6 +13566,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
                /* Disable KR2 on both lanes */
                DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
                bnx2x_disable_kr2(params, vars, phy);
+               /* Restart AN on leading lane */
+               bnx2x_warpcore_restart_AN_KR(phy, params);
                return;
        }
 }
index a6704b555042dfd26eab956f64082578bcf51a09..e622cc1f96ffe58336ee8c24f12c54e1679e11b7 100644 (file)
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 }
 
 /* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp)
 {
-       u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
 
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
        spin_lock_bh(&bp->dmae_lock);
 
        /* reset completion */
-       *wb_comp = 0;
+       *comp = 0;
 
        /* post the command on the channel used for initializations */
        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 
        /* wait for completion */
        udelay(5);
-       while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+       while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 
                if (!cnt ||
                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
                cnt--;
                udelay(50);
        }
-       if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+       if (*comp & DMAE_PCI_ERR_FLAG) {
                BNX2X_ERR("DMAE PCI error!\n");
                rc = DMAE_PCI_ERROR;
        }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
        return rc;
 }
 
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp)        \
+       (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 {
        u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                trace_shmem_base = bp->common.shmem_base;
        else
                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
-       addr = trace_shmem_base - 0x800;
+
+       /* sanity */
+       if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+           trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+                               SCRATCH_BUFFER_SIZE(bp)) {
+               BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+                         trace_shmem_base);
+               return;
+       }
+
+       addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
 
        /* validate TRCB signature */
        mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
        /* read cyclic buffer pointer */
        addr += 4;
        mark = REG_RD(bp, addr);
-       mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
-                       + ((mark + 0x3) & ~0x3) - 0x08000000;
+       mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+       if (mark >= trace_shmem_base || mark < addr + 4) {
+               BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+               return;
+       }
        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 
        printk("%s", lvl);
 
        /* dump buffer after the mark */
-       for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+       for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
                data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
        pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "BRB");
+                       res |= true; /* Each bit is real error! */
+
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "BRB");
                                        _print_parity(bp,
                                                      BRB1_REG_BRB1_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PARSER");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PARSER");
                                        _print_parity(bp, PRS_REG_PRS_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSDM");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TSDM");
                                        _print_parity(bp,
                                                      TSDM_REG_TSDM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++,
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
                                                          "SEARCHER");
                                        _print_parity(bp, SRC_REG_SRC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TCM");
-                                       _print_parity(bp,
-                                                     TCM_REG_TCM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSEMI");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TCM");
+                                       _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "TSEMI");
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "XPB");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "XPB");
                                        _print_parity(bp, GRCBASE_XPB +
                                                          PB_REG_PB_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
                                            bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
+                       res |= true; /* Each bit is real error! */
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "PBF");
+                                       _print_next_block((*par_num)++, "PBF");
                                        _print_parity(bp, PBF_REG_PBF_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "QM");
+                                       _print_next_block((*par_num)++, "QM");
                                        _print_parity(bp, QM_REG_QM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "TM");
+                                       _print_next_block((*par_num)++, "TM");
                                        _print_parity(bp, TM_REG_TM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSDM");
+                                       _print_next_block((*par_num)++, "XSDM");
                                        _print_parity(bp,
                                                      XSDM_REG_XSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XCM");
+                                       _print_next_block((*par_num)++, "XCM");
                                        _print_parity(bp, XCM_REG_XCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "XSEMI");
                                        _print_parity(bp,
                                                      XSEM_REG_XSEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "DOORBELLQ");
                                        _print_parity(bp,
                                                      DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "NIG");
+                                       _print_next_block((*par_num)++, "NIG");
                                        if (CHIP_IS_E1x(bp)) {
                                                _print_parity(bp,
                                                        NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "VAUX PCI CORE");
                                *global = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "DEBUG");
+                                       _print_next_block((*par_num)++,
+                                                         "DEBUG");
                                        _print_parity(bp, DBG_REG_DBG_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USDM");
+                                       _print_next_block((*par_num)++, "USDM");
                                        _print_parity(bp,
                                                      USDM_REG_USDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UCM");
+                                       _print_next_block((*par_num)++, "UCM");
                                        _print_parity(bp, UCM_REG_UCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "USEMI");
                                        _print_parity(bp,
                                                      USEM_REG_USEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UPB");
+                                       _print_next_block((*par_num)++, "UPB");
                                        _print_parity(bp, GRCBASE_UPB +
                                                          PB_REG_PB_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CSDM");
+                                       _print_next_block((*par_num)++, "CSDM");
                                        _print_parity(bp,
                                                      CSDM_REG_CSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CCM");
+                                       _print_next_block((*par_num)++, "CCM");
                                        _print_parity(bp, CCM_REG_CCM_PRTY_STS);
                                }
                                break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CSEMI");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "CSEMI");
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PXP");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "PXP");
                                        _print_parity(bp, PXP_REG_PXP_PRTY_STS);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_0);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-                               if (print)
-                                       _print_next_block(par_num++,
-                                       "PXPPCICLOCKCLIENT");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CFC");
+                                       break;
+                               case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PXPPCICLOCKCLIENT");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CFC");
                                        _print_parity(bp,
                                                      CFC_REG_CFC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CDU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CDU");
                                        _print_parity(bp, CDU_REG_CDU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "DMAE");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "DMAE");
                                        _print_parity(bp,
                                                      DMAE_REG_DMAE_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "IGU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "IGU");
                                        if (CHIP_IS_E1x(bp))
                                                _print_parity(bp,
                                                        HC_REG_HC_PRTY_STS);
                                        else
                                                _print_parity(bp,
                                                        IGU_REG_IGU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "MISC");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "MISC");
                                        _print_parity(bp,
                                                      MISC_REG_MISC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
-                                          bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
+                                           bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       bool res = false;
+       u32 cur_bit;
+       int i;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
                                if (print)
-                                       _print_next_block(par_num++, "MCP ROM");
+                                       _print_next_block((*par_num)++,
+                                                         "MCP ROM");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP RX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP TX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP SCPAD");
-                               *global = true;
+                               /* clear latched SCPAD PATIRY from MCP */
+                               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+                                      1UL << 10);
                                break;
                        }
 
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PGLUE_B");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PGLUE_B");
                                        _print_parity(bp,
-                                               PGLUE_B_REG_PGLUE_B_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "ATC");
+                                                     PGLUE_B_REG_PGLUE_B_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "ATC");
                                        _print_parity(bp,
                                                      ATC_REG_ATC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
-
                        /* Clear the bit */
                        sig &= ~cur_bit;
                }
        }
 
-       return par_num;
+       return res;
 }
 
 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                              u32 *sig)
 {
+       bool res = false;
+
        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                if (print)
                        netdev_err(bp->dev,
                                   "Parity errors detected in blocks: ");
-               par_num = bnx2x_check_blocks_with_parity0(bp,
-                       sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity1(bp,
-                       sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity2(bp,
-                       sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity3(
-                       sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity4(bp,
-                       sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+               res |= bnx2x_check_blocks_with_parity0(bp,
+                       sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity1(bp,
+                       sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity2(bp,
+                       sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity3(bp,
+                       sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity4(bp,
+                       sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
 
                if (print)
                        pr_cont("\n");
+       }
 
-               return true;
-       } else
-               return false;
+       return res;
 }
 
 /**
@@ -4703,6 +4729,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
        attn.sig[3] = REG_RD(bp,
                MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
                             port*4);
+       /* Since MCP attentions can't be disabled inside the block, we need to
+        * read AEU registers to see whether they're currently disabled
+        */
+       attn.sig[3] &= ((REG_RD(bp,
+                               !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+                                     : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+                        MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+                       ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
 
        if (!CHIP_IS_E1x(bp))
                attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5481,24 @@ static void bnx2x_timer(unsigned long data)
        if (IS_PF(bp) &&
            !BP_NOMCP(bp)) {
                int mb_idx = BP_FW_MB_IDX(bp);
-               u32 drv_pulse;
-               u32 mcp_pulse;
+               u16 drv_pulse;
+               u16 mcp_pulse;
 
                ++bp->fw_drv_pulse_wr_seq;
                bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
-               /* TBD - add SYSTEM_TIME */
                drv_pulse = bp->fw_drv_pulse_wr_seq;
                bnx2x_drv_pulse(bp);
 
                mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
                             MCP_PULSE_SEQ_MASK);
                /* The delta between driver pulse and mcp response
-                * should be 1 (before mcp response) or 0 (after mcp response)
+                * should not get too big. If the MFW is more than 5 pulses
+                * behind, we should worry about it enough to generate an error
+                * log.
                 */
-               if ((drv_pulse != mcp_pulse) &&
-                   (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
-                       /* someone lost a heartbeat... */
-                       BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+               if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+                       BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
                                  drv_pulse, mcp_pulse);
-               }
        }
 
        if (bp->state == BNX2X_STATE_OPEN)
@@ -7120,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        int port = BP_PORT(bp);
        int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
        u32 low, high;
-       u32 val;
+       u32 val, reg;
 
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
@@ -7265,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        val |= CHIP_IS_E1(bp) ? 0 : 0x10;
        REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 
+       /* SCPAD_PARITY should NOT trigger close the gates */
+       reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+       reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
        bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 
        if (!CHIP_IS_E1x(bp)) {
@@ -9873,7 +9916,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
 {
        struct bnx2x_prev_path_list *tmp_list;
-       int rc = false;
+       bool rc = false;
 
        if (down_trylock(&bnx2x_prev_sem))
                return false;
@@ -11143,6 +11186,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_get_cnic_mac_hwinfo(bp);
        }
 
+       if (!BP_NOMCP(bp)) {
+               /* Read physical port identifier from shmem */
+               val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+               val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+               bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+               bp->flags |= HAS_PHYS_PORT_ID;
+       }
+
        memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 
        if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11679,9 +11730,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 static int bnx2x_open(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       bool global = false;
-       int other_engine = BP_PATH(bp) ? 0 : 1;
-       bool other_load_status, load_status;
        int rc;
 
        bp->stats_init = true;
@@ -11697,6 +11745,10 @@ static int bnx2x_open(struct net_device *dev)
         * Parity recovery is only relevant for PF driver.
         */
        if (IS_PF(bp)) {
+               int other_engine = BP_PATH(bp) ? 0 : 1;
+               bool other_load_status, load_status;
+               bool global = false;
+
                other_load_status = bnx2x_get_load_status(bp, other_engine);
                load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
                if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -11740,7 +11792,7 @@ static int bnx2x_open(struct net_device *dev)
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
-       return bnx2x_open_epilog(bp);
+       return 0;
 }
 
 /* called with rtnl_lock */
@@ -12038,6 +12090,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
        return 0;
 }
 
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+                                 struct netdev_phys_port_id *ppid)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+
+       if (!(bp->flags & HAS_PHYS_PORT_ID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = sizeof(bp->phys_port_id);
+       memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -12067,19 +12133,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {
 #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll          = bnx2x_low_latency_recv,
 #endif
+       .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 {
        struct device *dev = &bp->pdev->dev;
 
-       if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
-               bp->flags |= USING_DAC_FLAG;
-               if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
-                       dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
-                       return -EIO;
-               }
-       } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+           dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
                dev_err(dev, "System does not support DMA, aborting\n");
                return -EIO;
        }
@@ -12231,10 +12293,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
                NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
        if (!CHIP_IS_E1x(bp)) {
-               dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+               dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+                                   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
                dev->hw_enc_features =
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
                        NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+                       NETIF_F_GSO_IPIP |
+                       NETIF_F_GSO_SIT |
                        NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
        }
 
@@ -12242,8 +12307,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
-       if (bp->flags & USING_DAC_FLAG)
-               dev->features |= NETIF_F_HIGHDMA;
+       dev->features |= NETIF_F_HIGHDMA;
 
        /* Add Loopback capability to the device */
        dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12268,34 +12332,11 @@ err_out_release:
 
 err_out_disable:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
 err_out:
        return rc;
 }
 
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
-                                      enum bnx2x_pci_bus_speed *speed)
-{
-       u32 link_speed, val = 0;
-
-       pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
-       *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
-       link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
-       switch (link_speed) {
-       case 3:
-               *speed = BNX2X_PCI_LINK_SPEED_8000;
-               break;
-       case 2:
-               *speed = BNX2X_PCI_LINK_SPEED_5000;
-               break;
-       default:
-               *speed = BNX2X_PCI_LINK_SPEED_2500;
-       }
-}
-
 static int bnx2x_check_firmware(struct bnx2x *bp)
 {
        const struct firmware *firmware = bp->firmware;
@@ -12606,24 +12647,24 @@ static int set_max_cos_est(int chip_id)
                return BNX2X_MULTI_TX_COS_E1X;
        case BCM57712:
        case BCM57712_MF:
-       case BCM57712_VF:
                return BNX2X_MULTI_TX_COS_E2_E3A0;
        case BCM57800:
        case BCM57800_MF:
-       case BCM57800_VF:
        case BCM57810:
        case BCM57810_MF:
        case BCM57840_4_10:
        case BCM57840_2_20:
        case BCM57840_O:
        case BCM57840_MFO:
-       case BCM57810_VF:
        case BCM57840_MF:
-       case BCM57840_VF:
        case BCM57811:
        case BCM57811_MF:
-       case BCM57811_VF:
                return BNX2X_MULTI_TX_COS_E3B0;
+       case BCM57712_VF:
+       case BCM57800_VF:
+       case BCM57810_VF:
+       case BCM57840_VF:
+       case BCM57811_VF:
                return 1;
        default:
                pr_err("Unknown board_type (%d), aborting\n", chip_id);
@@ -12652,8 +12693,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
-       int pcie_width;
-       enum bnx2x_pci_bus_speed pcie_speed;
+       enum pcie_link_width pcie_width;
+       enum pci_bus_speed pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
        int max_cos_est;
@@ -12802,18 +12843,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
-
-       bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-       BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
-                      pcie_width, pcie_speed);
-
-       BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+       if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+           pcie_speed == PCI_SPEED_UNKNOWN ||
+           pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+               BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+       else
+               BNX2X_DEV_INFO(
+                      "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
                       board_info[ent->driver_data].name,
                       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
                       pcie_width,
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+                      pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+                      pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+                      pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
                       "Unknown",
                       dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
@@ -12832,7 +12874,6 @@ init_one_exit:
                pci_release_regions(pdev);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        return rc;
 }
@@ -12915,7 +12956,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                pci_release_regions(pdev);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void bnx2x_remove_one(struct pci_dev *pdev)
index 2604b6204abe03965cf0f89dd48b5a25bf00e7e1..71fffad94affa6ba994fcd454edba15de227b746 100644 (file)
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
                                 bnx2x_vfop_qdtor, cmd->done);
                return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
                                             cmd->block);
+       } else {
+               BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
+               return -ENOMEM;
        }
-       DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
-          vf->abs_vfid, vfop->rc);
-       return -ENOMEM;
 }
 
 static void
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
                if (fid & IGU_FID_ENCODE_IS_PF)
                        current_pf = fid & IGU_FID_PF_NUM_MASK;
-               else if (current_pf == BP_ABS_FUNC(bp))
+               else if (current_pf == BP_FUNC(bp))
                        bnx2x_vf_set_igu_info(bp, sb_id,
                                              (fid & IGU_FID_VF_NUM_MASK));
                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -2802,7 +2802,7 @@ struct set_vf_state_cookie {
        u8 state;
 };
 
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
 {
        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
 
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
                /* set local queue arrays */
                vf->vfqs = &bp->vfdb->vfqs[qcount];
                qcount += vf_sb_count(vf);
+               bnx2x_iov_static_resc(bp, vf);
        }
 
        /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
                       num_vf_queues);
+               DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+                  vf_idx, num_vf_queues);
        }
        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 
@@ -3222,8 +3225,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
-                       struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+                            struct bnx2x_virtf **vf,
+                            struct pf_vf_bulletin_content **bulletin)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
                BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3387,14 +3391,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete eth macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* remove existing uc list macs */
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete uc_list macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* configure the new mac to device */
@@ -3402,6 +3408,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
                                  BNX2X_ETH_MAC, &ramrod_flags);
 
+out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
@@ -3464,7 +3471,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                                          &ramrod_flags);
                if (rc) {
                        BNX2X_ERR("failed to delete vlans\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* send queue update ramrod to configure default vlan and silent
@@ -3498,7 +3506,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
                        if (rc) {
                                BNX2X_ERR("failed to configure vlan\n");
-                               return -EINVAL;
+                               rc =  -EINVAL;
+                               goto out;
                        }
 
                        /* configure default vlan to vf queue and set silent
@@ -3516,18 +3525,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                rc = bnx2x_queue_state_change(bp, &q_params);
                if (rc) {
                        BNX2X_ERR("Failed to configure default VLAN\n");
-                       return rc;
+                       goto out;
                }
 
                /* clear the flag indicating that this VF needs its vlan
-                * (will only be set if the HV configured th Vlan before vf was
-                * and we were called because the VF came up later
+                * (will only be set if the HV configured the Vlan before vf was
+                * up and we were called because the VF came up later
                 */
+out:
                vf->cfg_flags &= ~VF_CFG_VLAN;
-
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
        }
-       return 0;
+       return rc;
 }
 
 /* crc is the first field in the bulletin board. Compute the crc over the
@@ -3634,29 +3643,6 @@ alloc_mem_err:
        return -ENOMEM;
 }
 
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
-       /* Enable sriov via delayed work. This must be done via delayed work
-        * because it causes the probe of the vf devices to be run, which invoke
-        * register_netdevice which must have rtnl lock taken. As we are holding
-        * the lock right now, that could only work if the probe would not take
-        * the lock. However, as the probe of the vf may be called from other
-        * contexts as well (such as passthrough to vm fails) it can't assume
-        * the lock is being held for it. Using delayed work here allows the
-        * probe code to simply take the lock (i.e. wait for it to be released
-        * if it is being held). We only want to do this if the number of VFs
-        * was set before PF driver was loaded.
-        */
-       if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
-       }
-
-       return 0;
-}
-
 void bnx2x_iov_channel_down(struct bnx2x *bp)
 {
        int vf_idx;
index 059f0d460af2a249e631a913ec98da290d78f323..1ff6a9366629ed88fe79a079391c92e95d1e9baf 100644 (file)
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
 void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
 
 #else /* CONFIG_BNX2X_SRIOV */
 
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 86436c77af036d7884b9cc9525646a44f3ec4acf..3b75070411aab83136ac2c245ba9c65682aefebe 100644 (file)
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
 
        } else if (bp->func_stx) {
                *stats_comp = 0;
-               bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+               bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
        }
 }
 
index 6cfb8873245281a74222926b6052838d6df1eca0..9199adf32d33c1639dfa2354e609ef92b638ef91 100644 (file)
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
        mutex_unlock(&bp->vf2pf_mutex);
 }
 
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+                                  enum channel_tlvs req_tlv)
+{
+       struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+       do {
+               if (tlv->type == req_tlv)
+                       return tlv;
+
+               if (!tlv->length) {
+                       BNX2X_ERR("Found TLV with length 0\n");
+                       return NULL;
+               }
+
+               tlvs_list += tlv->length;
+               tlv = (struct channel_tlv *)tlvs_list;
+       } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+       DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+       return NULL;
+}
+
 /* list the types and lengths of the tlvs on the buffer */
 void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
 {
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        int rc = 0, attempts = 0;
        struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
        struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+       struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
        u32 vf_id;
        bool resources_acquired = false;
 
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        /* pf 2 vf bulletin board address */
        req->bulletin_addr = bp->pf2vf_bulletin_mapping;
 
+       /* Request physical port identifier */
+       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+                     CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
        /* add list termination tlv */
-       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+       bnx2x_add_tlv(bp, req,
+                     req->first_tlv.tl.length + sizeof(struct channel_tlv),
+                     CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
 
        /* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
                }
        }
 
+       /* Retrieve physical port id (if possible) */
+       phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+                        bnx2x_search_tlv_list(bp, resp,
+                                              CHANNEL_TLV_PHYS_PORT_ID);
+       if (phys_port_resp) {
+               memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+               bp->flags |= HAS_PHYS_PORT_ID;
+       }
+
        /* get HW info */
        bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
        bp->link_params.chip_id = bp->common.chip_id;
@@ -980,56 +1020,62 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       return bnx2x_issue_dmae_with_comp(bp, &dmae);
+       return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 }
 
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf)
 {
        struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
-       u64 vf_addr;
-       dma_addr_t pf_addr;
        u16 length, type;
-       int rc;
-       struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
 
        /* prepare response */
        type = mbx->first_tlv.tl.type;
        length = type == CHANNEL_TLV_ACQUIRE ?
                sizeof(struct pfvf_acquire_resp_tlv) :
                sizeof(struct pfvf_general_resp_tlv);
-       bnx2x_add_tlv(bp, resp, 0, type, length);
-       resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
-       bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+       bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+       bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf)
+{
+       struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+       struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+       dma_addr_t pf_addr;
+       u64 vf_addr;
+       int rc;
+
        bnx2x_dp_tlv_list(bp, resp);
        DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
           mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
 
+       resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
        /* send response */
        vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
                  mbx->first_tlv.resp_msg_offset;
        pf_addr = mbx->msg_mapping +
                  offsetof(struct bnx2x_vf_mbx_msg, resp);
 
-       /* copy the response body, if there is one, before the header, as the vf
-        * is sensitive to the header being written
+       /* Copy the response buffer. The first u64 is written afterwards, as
+        * the vf is sensitive to the header being written
         */
-       if (resp->hdr.tl.length > sizeof(u64)) {
-               length = resp->hdr.tl.length - sizeof(u64);
-               vf_addr += sizeof(u64);
-               pf_addr += sizeof(u64);
-               rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
-                                         U64_HI(vf_addr),
-                                         U64_LO(vf_addr),
-                                         length/4);
-               if (rc) {
-                       BNX2X_ERR("Failed to copy response body to VF %d\n",
-                                 vf->abs_vfid);
-                       goto mbx_error;
-               }
-               vf_addr -= sizeof(u64);
-               pf_addr -= sizeof(u64);
+       vf_addr += sizeof(u64);
+       pf_addr += sizeof(u64);
+       rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+                                 U64_HI(vf_addr),
+                                 U64_LO(vf_addr),
+                                 (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+       if (rc) {
+               BNX2X_ERR("Failed to copy response body to VF %d\n",
+                         vf->abs_vfid);
+               goto mbx_error;
        }
+       vf_addr -= sizeof(u64);
+       pf_addr -= sizeof(u64);
 
        /* ack the FW */
        storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
        bnx2x_vf_release(bp, vf, false); /* non blocking */
 }
 
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf)
+{
+       bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+                                       struct bnx2x_virtf *vf,
+                                       void *buffer,
+                                       u16 *offset)
+{
+       struct vfpf_port_phys_id_resp_tlv *port_id;
+
+       if (!(bp->flags & HAS_PHYS_PORT_ID))
+               return;
+
+       bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+                     sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+       port_id = (struct vfpf_port_phys_id_resp_tlv *)
+                 (((u8 *)buffer) + *offset);
+       memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+       /* Offset should continue representing the offset to the tail
+        * of TLV data (outside this function scope)
+        */
+       *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                      struct bnx2x_vf_mbx *mbx, int vfop_status)
 {
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
        struct pf_vf_resc *resc = &resp->resc;
        u8 status = bnx2x_pfvf_status_codes(vfop_status);
+       u16 length;
 
        memset(resp, 0, sizeof(*resp));
 
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        resc->hw_sbs[i].sb_qid);
        DP_CONT(BNX2X_MSG_IOV, "]\n");
 
+       /* prepare response */
+       length = sizeof(struct pfvf_acquire_resp_tlv);
+       bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+       /* Handle possible VF requests for physical port identifiers.
+        * 'length' should continue to indicate the offset of the first empty
+        * place in the buffer (i.e., where next TLV should be inserted)
+        */
+       if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+                                 CHANNEL_TLV_PHYS_PORT_ID))
+               bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+       bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
        /* send the response */
        vf->op_rc = vfop_status;
-       bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf);
 }
 
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1765,28 +1857,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                switch (mbx->first_tlv.tl.type) {
                case CHANNEL_TLV_ACQUIRE:
                        bnx2x_vf_mbx_acquire(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_INIT:
                        bnx2x_vf_mbx_init_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_SETUP_Q:
                        bnx2x_vf_mbx_setup_q(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_SET_Q_FILTERS:
                        bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_TEARDOWN_Q:
                        bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_CLOSE:
                        bnx2x_vf_mbx_close_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_RELEASE:
                        bnx2x_vf_mbx_release_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_UPDATE_RSS:
                        bnx2x_vf_mbx_update_rss(bp, vf, mbx);
-                       break;
+                       return;
                }
 
        } else {
@@ -1802,26 +1894,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                for (i = 0; i < 20; i++)
                        DP_CONT(BNX2X_MSG_IOV, "%x ",
                                mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+       }
 
-               /* test whether we can respond to the VF (do we have an address
-                * for it?)
-                */
-               if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
-                       /* mbx_resp uses the op_rc of the VF */
-                       vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+       /* can we respond to VF (do we have an address for it?) */
+       if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+               /* mbx_resp uses the op_rc of the VF */
+               vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
 
-                       /* notify the VF that we do not support this request */
-                       bnx2x_vf_mbx_resp(bp, vf);
-               } else {
-                       /* can't send a response since this VF is unknown to us
-                        * just ack the FW to release the mailbox and unlock
-                        * the channel.
-                        */
-                       storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
-                       mmiowb();
-                       bnx2x_unlock_vf_pf_channel(bp, vf,
-                                                  mbx->first_tlv.tl.type);
-               }
+               /* notify the VF that we do not support this request */
+               bnx2x_vf_mbx_resp(bp, vf);
+       } else {
+               /* can't send a response since this VF is unknown to us
+                * just ack the FW to release the mailbox and unlock
+                * the channel.
+                */
+               storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+               /* Firmware ack should be written before unlocking channel */
+               mmiowb();
+               bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
        }
 }
 
@@ -1876,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
        /* process the VF message header */
        mbx->first_tlv = mbx->msg->req.first_tlv;
 
+       /* Clean response buffer to refrain from falsely seeing chains */
+       memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
        /* dispatch the request (will prepare the response) */
        bnx2x_vf_mbx_request(bp, vf, mbx);
        goto mbx_done;
index 1179fe06d0c7ff010b7a06102a68b601c747bf9c..208568bc7a71d72d8e19aea70f02485e17c0e8ee 100644 (file)
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
        } resc;
 };
 
+struct vfpf_port_phys_id_resp_tlv {
+       struct channel_tlv tl;
+       u8 id[ETH_ALEN];
+       u8 padding[2];
+};
+
 #define VFPF_INIT_FLG_STATS_COALESCE   (1 << 0) /* when set the VFs queues
                                                  * stats will be coalesced on
                                                  * the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
        CHANNEL_TLV_PF_SET_MAC,
        CHANNEL_TLV_PF_SET_VLAN,
        CHANNEL_TLV_UPDATE_RSS,
+       CHANNEL_TLV_PHYS_PORT_ID,
        CHANNEL_TLV_MAX
 };
 
index 99394bd49a139414776df9ff776c02b2fe6f3c26..f58a8b80302d9b2088ce139688b76e3a9181bb42 100644 (file)
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 
                        csk->vlan_id = path_resp->vlan_id;
 
-                       memcpy(csk->ha, path_resp->mac_addr, 6);
+                       memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
                        if (test_bit(SK_F_IPV6, &csk->flags))
                                memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
                                       sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
                cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
 
-       memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+       memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
 
        cp->cnic_ops = &cnic_bnx2x_ops;
        cp->start_hw = cnic_start_bnx2x_hw;
index 0658b43e148c1c45ecb2013696534f2ff1d2ffb0..ebbfe25acaa6e0fc927938bd54150435a1feedb5 100644 (file)
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
        atomic_t ref_count;
 };
 
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
 
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
 
 #endif
index 12d961c4ebcaf704e36b83c8624633c2fbd48dda..819d87c281bfba633d19628e47237bdf84cf03d3 100644 (file)
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    133
+#define TG3_MIN_NUM                    134
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "Jul 29, 2013"
+#define DRV_MODULE_RELDATE     "Sep 16, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
        return err;
 }
 
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+       return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+                           reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
 static int tg3_bmcr_reset(struct tg3 *tp)
 {
        u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_readphy(tp, reg, &val))
+       if (__tg3_readphy(tp, mii_id, reg, &val))
                val = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_writephy(tp, reg, val))
+       if (__tg3_writephy(tp, mii_id, reg, val))
                ret = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
        u32 val;
        struct phy_device *phydev;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
        switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
        case PHY_ID_BCM50610:
        case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
                                    TG3_CPMU_PHY_STRAP_IS_SERDES;
                if (is_serdes)
                        tp->phy_addr += 7;
+       } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+               int addr;
+
+               addr = ssb_gige_get_phyaddr(tp->pdev);
+               if (addr < 0)
+                       return addr;
+               tp->phy_addr = addr;
        } else
                tp->phy_addr = TG3_PHY_MII_ADDR;
 
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
        tp->mdio_bus->read     = &tg3_mdio_read;
        tp->mdio_bus->write    = &tg3_mdio_write;
        tp->mdio_bus->reset    = &tg3_mdio_reset;
-       tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+       tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
        tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
        for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
                return i;
        }
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (!phydev || !phydev->drv) {
                dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
        u32 old_tx_mode = tp->tx_mode;
 
        if (tg3_flag(tp, USE_PHYLIB))
-               autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+               autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
        else
                autoneg = tp->link_config.autoneg;
 
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
        u8 oldflowctrl, linkmesg = 0;
        u32 mac_mode, lcl_adv, rmt_adv;
        struct tg3 *tp = netdev_priv(dev);
-       struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        spin_lock_bh(&tp->lock);
 
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
        /* Bring the PHY back to a known state. */
        tg3_bmcr_reset(tp);
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        /* Attach the MAC to the PHY. */
        phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
                                      SUPPORTED_Asym_Pause);
                break;
        default:
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                return -EINVAL;
        }
 
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
                tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+       phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
        }
 }
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
                return;
        }
 
-       reg = MII_TG3_MISC_SHDW_WREN |
-             MII_TG3_MISC_SHDW_SCR5_SEL |
-             MII_TG3_MISC_SHDW_SCR5_LPED |
+       reg = MII_TG3_MISC_SHDW_SCR5_LPED |
              MII_TG3_MISC_SHDW_SCR5_DLPTLM |
              MII_TG3_MISC_SHDW_SCR5_SDTL |
              MII_TG3_MISC_SHDW_SCR5_C125OE;
        if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
                reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 
-       tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+       tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
 
 
-       reg = MII_TG3_MISC_SHDW_WREN |
-             MII_TG3_MISC_SHDW_APD_SEL |
-             MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+       reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
        if (enable)
                reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 
-       tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+       tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
 }
 
 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                        struct phy_device *phydev;
                        u32 phyid, advertising;
 
-                       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+                       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                        tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
                memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
        }
 
-       if (err)
-               return err;
-
-       return 0;
+       return err;
 }
 
 static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
                name = tp->dev->name;
        else {
                name = &tnapi->irq_lbl[0];
-               snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+               if (tnapi->tx_buffers && tnapi->rx_rcb)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-txrx-%d", tp->dev->name, irq_num);
+               else if (tnapi->tx_buffers)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-tx-%d", tp->dev->name, irq_num);
+               else if (tnapi->rx_rcb)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-rx-%d", tp->dev->name, irq_num);
+               else
+                       snprintf(name, IFNAMSIZ,
+                                "%s-%d", tp->dev->name, irq_num);
                name[IFNAMSIZ-1] = 0;
        }
 
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_gset(phydev, cmd);
        }
 
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_sset(phydev, cmd);
        }
 
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
        device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
 
-       spin_lock_bh(&tp->lock);
        if (device_may_wakeup(dp))
                tg3_flag_set(tp, WOL_ENABLE);
        else
                tg3_flag_clear(tp, WOL_ENABLE);
-       spin_unlock_bh(&tp->lock);
 
        return 0;
 }
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
        if (tg3_flag(tp, USE_PHYLIB)) {
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
        } else {
                u32 bmcr;
 
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
                u32 newadv;
                struct phy_device *phydev;
 
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                if (!(phydev->supported & SUPPORTED_Pause) ||
                    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                return -ENOMEM;
 
        tx_data = skb_put(skb, tx_len);
-       memcpy(tx_data, tp->dev->dev_addr, 6);
-       memset(tx_data + 6, 0x0, 8);
+       memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+       memset(tx_data + ETH_ALEN, 0x0, 8);
 
        tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 
@@ -13683,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_mii_ioctl(phydev, ifr, cmd);
        }
 
@@ -14921,6 +14941,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                            tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
                                tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
                                                 LED_CTRL_MODE_PHY_2);
+
+                       if (tg3_flag(tp, 5717_PLUS) ||
+                           tg3_asic_rev(tp) == ASIC_REV_5762)
+                               tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+                                               LED_CTRL_BLINK_RATE_MASK;
+
                        break;
 
                case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
                        reg = TG3PCI_GEN2_PRODID_ASICREV;
                else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
        int len;
 
        addr = of_get_property(dp, "local-mac-address", &len);
-       if (addr && len == 6) {
-               memcpy(dev->dev_addr, addr, 6);
+       if (addr && len == ETH_ALEN) {
+               memcpy(dev->dev_addr, addr, ETH_ALEN);
                return 0;
        }
        return -ENODEV;
@@ -16643,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
 {
        struct net_device *dev = tp->dev;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        return 0;
 }
 #endif
@@ -17052,10 +17081,6 @@ static int tg3_test_dma(struct tg3 *tp)
 
        tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 
-#if 0
-       /* Unneeded, already done by tg3_get_invariants.  */
-       tg3_switch_clocks(tp);
-#endif
 
        if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
            tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@ static int tg3_test_dma(struct tg3 *tp)
                        break;
                }
 
-#if 0
-               /* validate data reached card RAM correctly. */
-               for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
-                       u32 val;
-                       tg3_read_mem(tp, 0x2100 + (i*4), &val);
-                       if (le32_to_cpu(val) != p[i]) {
-                               dev_err(&tp->pdev->dev,
-                                       "%s: Buffer corrupted on device! "
-                                       "(%d != %d)\n", __func__, val, i);
-                               /* ret = -ENODEV here? */
-                       }
-                       p[i] = 0;
-               }
-#endif
                /* Now read it back. */
                ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
                if (ret) {
@@ -17362,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
                        tg3_flag_set(tp, FLUSH_POSTED_WRITES);
                if (ssb_gige_one_dma_at_once(pdev))
                        tg3_flag_set(tp, ONE_DMA_AT_ONCE);
-               if (ssb_gige_have_roboswitch(pdev))
+               if (ssb_gige_have_roboswitch(pdev)) {
+                       tg3_flag_set(tp, USE_PHYLIB);
                        tg3_flag_set(tp, ROBOSWITCH);
+               }
                if (ssb_gige_is_rgmii(pdev))
                        tg3_flag_set(tp, RGMII_MODE);
        }
@@ -17409,9 +17422,12 @@ static int tg3_init_one(struct pci_dev *pdev,
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-           tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
                tg3_flag_set(tp, ENABLE_APE);
                tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
                if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                struct phy_device *phydev;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                netdev_info(dev,
                            "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
                            phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@ err_out_free_res:
 err_out_disable_pdev:
        if (pci_is_enabled(pdev))
                pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -17717,7 +17732,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 70257808aa37deb3c2ff0511a16a6966a8d5f9c0..5c3835aa1e1b0702d602102031a260644129390e 100644 (file)
@@ -68,6 +68,9 @@
 #define  TG3PCI_DEVICE_TIGON3_5762      0x1687
 #define  TG3PCI_DEVICE_TIGON3_5725      0x1643
 #define  TG3PCI_DEVICE_TIGON3_5727      0x16f3
+#define  TG3PCI_DEVICE_TIGON3_57764     0x1642
+#define  TG3PCI_DEVICE_TIGON3_57767     0x1683
+#define  TG3PCI_DEVICE_TIGON3_57787     0x1641
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM           PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6   0x1644
index b78e69e0e52a291047e72c222530a1500e4ac003..248bc37cb41b24680441c057312e9882f905dd1d 100644 (file)
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
        bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
        if (!bnad->bar0) {
                dev_err(&pdev->dev, "ioremap for bar0 failed\n");
-               pci_set_drvdata(pdev, NULL);
                return -ENOMEM;
        }
        pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@ -3300,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
        err = pci_request_regions(pdev, BNAD_NAME);
        if (err)
                goto disable_device;
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                *using_dac = true;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err)
-                               goto release_regions;
-               }
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       goto release_regions;
                *using_dac = false;
        }
        pci_set_master(pdev);
index aefee77523f2fbc52c621883ebe8fd56a5ffbfce..f7e033f8a00e30f0ed33e0f9889d14bafbf512a6 100644 (file)
@@ -372,38 +372,37 @@ extern u32                bnad_rxqs_per_cq;
 /*
  * EXTERN PROTOTYPES
  */
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
 /* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
-                               struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
 
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
 
 /* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
-               struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
-               struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+                            struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+                             struct rtnl_link_stats64 *stats);
 
 /* Debugfs */
-void   bnad_debugfs_init(struct bnad *bnad);
-void   bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
 
 /* MACROS */
 /* To set & get the stats counters */
index 78d6d6b970e105bb1de4c157a35af4e861e4f280..48f52882a22b07414b7fed808fe72ccd52650300 100644 (file)
 #define XGMAC_DMA_HW_FEATURE   0x00000f58      /* Enabled Hardware Features */
 
 #define XGMAC_ADDR_AE          0x80000000
-#define XGMAC_MAX_FILTER_ADDR  31
 
 /* PMT Control and Status */
 #define XGMAC_PMT_POINTER_RESET        0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
        struct device *device;
        struct napi_struct napi;
 
+       int max_macs;
        struct xgmac_extra_stats xstats;
 
        spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
                 netdev_mc_count(dev), netdev_uc_count(dev));
 
-       if (dev->flags & IFF_PROMISC) {
-               writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
-               return;
-       }
+       if (dev->flags & IFF_PROMISC)
+               value |= XGMAC_FRAME_FILTER_PR;
 
        memset(hash_filter, 0, sizeof(hash_filter));
 
-       if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+       if (netdev_uc_count(dev) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
        }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
                goto out;
        }
 
-       if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+       if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
        } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        }
 
 out:
-       for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
-               xgmac_set_mac_addr(ioaddr, NULL, reg);
+       for (i = reg; i <= priv->max_macs; i++)
+               xgmac_set_mac_addr(ioaddr, NULL, i);
        for (i = 0; i < XGMAC_NUM_HASH; i++)
                writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
 
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
        uid = readl(priv->base + XGMAC_VERSION);
        netdev_info(ndev, "h/w version is 0x%x\n", uid);
 
+       /* Figure out how many valid mac address filter registers we have */
+       writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+       if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+               priv->max_macs = 31;
+       else
+               priv->max_macs = 7;
+
        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
        ndev->irq = platform_get_irq(pdev, 0);
        if (ndev->irq == -ENXIO) {
index 5ccbed1784d2020b4683881f26a1a99ab472ad9a..8abb46b39032df4bc13fe3ad4b0f227f9d2411e1 100644 (file)
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
        return board_info(adap)->clock_core / 1000000;
 }
 
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
-
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
-
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
                                                    unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
                     struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
                            int speed, int duplex, int pause);
 #endif /* _CXGB_COMMON_H_ */
index d7048db9863d905a57b23b9bc985a4c3d27f9bc9..1d021059f097579b9306e458e2c8a5cd92a41948 100644 (file)
@@ -1168,7 +1168,6 @@ out_free_dev:
        pci_release_regions(pdev);
 out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        t1_sw_reset(pdev);
 }
 
index 40c7b93ababc3d39d09e6f0ef9ba8e61af241955..eb33a31b08a01487dc17465ffbc7a4505030b9e2 100644 (file)
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 
 static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
 {
-       memcpy(mac_addr, cmac->instance->mac_addr, 6);
+       memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
        return 0;
 }
 
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
         */
 
        /* Store local copy */
-       memcpy(cmac->instance->mac_addr, ma, 6);
+       memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
 
        lo  = ((u32) ma[1] << 8) | (u32) ma[0];
        mid = ((u32) ma[3] << 8) | (u32) ma[2];
index b650951791dd46e81c35e972354ba133d48ee865..45d77334d7d9a6c1a053be81599edcbf8b74c3b7 100644 (file)
@@ -3374,7 +3374,6 @@ out_release_regions:
        pci_release_regions(pdev);
 out_disable_device:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 out:
        return err;
 }
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
                kfree(adapter);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 6990f6c6522164a7e602903554f3c8e3ccde83fc..81029b872bdd432a0d95f00048fa8f61143ceaa7 100644 (file)
 #define V_BUSY(x) ((x) << S_BUSY)
 #define F_BUSY    V_BUSY(1U)
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define A_MC7_EXT_MODE1 0x108
 
 #define A_MC7_EXT_MODE2 0x10c
 
 #define A_MC7_CAL 0x128
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define S_CAL_FAULT    30
 #define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
 #define F_CAL_FAULT    V_CAL_FAULT(1U)
 #define V_OP(x) ((x) << S_OP)
 #define F_OP    V_OP(1U)
 
-#define F_OP    V_OP(1U)
-#define A_SF_OP 0x6dc
-
 #define A_MC7_BIST_ADDR_BEG 0x168
 
 #define A_MC7_BIST_ADDR_END 0x16c
 #define V_CONT(x) ((x) << S_CONT)
 #define F_CONT    V_CONT(1U)
 
-#define F_CONT    V_CONT(1U)
-
 #define A_MC7_INT_ENABLE 0x178
 
 #define S_AE    17
 #define V_NICMODE(x) ((x) << S_NICMODE)
 #define F_NICMODE    V_NICMODE(1U)
 
-#define F_NICMODE    V_NICMODE(1U)
-
 #define S_IPV6ENABLE    15
 #define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
 #define F_IPV6ENABLE    V_IPV6ENABLE(1U)
 
 #define A_ULPRX_STAG_ULIMIT 0x530
 
-#define A_ULPRX_RQ_LLIMIT 0x534
 #define A_ULPRX_RQ_LLIMIT 0x534
 
-#define A_ULPRX_RQ_ULIMIT 0x538
 #define A_ULPRX_RQ_ULIMIT 0x538
 
 #define A_ULPRX_PBL_LLIMIT 0x53c
 
-#define A_ULPRX_PBL_ULIMIT 0x540
 #define A_ULPRX_PBL_ULIMIT 0x540
 
 #define A_ULPRX_TDDP_TAGMASK 0x524
 
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
 #define A_ULPTX_CONFIG 0x580
 
 #define S_CFG_CQE_SOP_MASK    1
 #define V_TMMODE(x) ((x) << S_TMMODE)
 #define F_TMMODE    V_TMMODE(1U)
 
-#define F_TMMODE    V_TMMODE(1U)
-
 #define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
 
 #define A_MC5_DB_FILTER_TABLE 0x710
 #define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
 #define F_TXACTENABLE    V_TXACTENABLE(1U)
 
-#define A_XGM_SERDES_CTRL0 0x8e0
-
 #define S_RESET3    23
 #define V_RESET3(x) ((x) << S_RESET3)
 #define F_RESET3    V_RESET3(1U)
index dfd1e36f57531f0e9e9cfeed14048460d96f6e30..ecd2fb3ef69596146a7cf155323e624e397b6834 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/vmalloc.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
-#include "t4_hw.h"
 
 #define FW_VERSION_MAJOR 1
 #define FW_VERSION_MINOR 4
index c73cabdbd4c08a22fd506eef8a219f02833fb4b3..8b929eeecd2d37cd3b41e32d57bd1fa50402a230 100644 (file)
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
        struct net_device *event_dev;
        int ret = NOTIFY_DONE;
        struct bonding *bond = netdev_priv(ifa->idev->dev);
+       struct list_head *iter;
        struct slave *slave;
        struct pci_dev *first_pdev = NULL;
 
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
                 * in all of them only once.
                 */
                read_lock(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if (!first_pdev) {
                                ret = clip_add(slave->dev, ifa, event);
                                /* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
        pci_disable_device(pdev);
  out_release_regions:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
                pci_disable_pcie_error_reporting(pdev);
                pci_disable_device(pdev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        } else
                pci_release_regions(pdev);
 }
index 40c22e7de15c2be52482b990354274bbc1fc3175..5f90ec5f7519a4ac6e7916396cff31446f5c614b 100644 (file)
@@ -2782,11 +2782,9 @@ err_unmap_bar:
 
 err_free_adapter:
        kfree(adapter);
-       pci_set_drvdata(pdev, NULL);
 
 err_release_regions:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        pci_clear_master(pdev);
 
 err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
                }
                iounmap(adapter->regs);
                kfree(adapter);
-               pci_set_drvdata(pdev, NULL);
        }
 
        /*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
 #define CH_DEVICE(devid, idx) \
        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
 
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
        CH_DEVICE(0xb000, 0),   /* PE10K FPGA */
        CH_DEVICE(0x4800, 0),   /* T440-dbg */
        CH_DEVICE(0x4801, 0),   /* T420-cr */
index df296af20bd52d1052a1c05200841d55dc4acde1..8475c4cda9e4ca72ef8a2b309788620e82fb67d3 100644 (file)
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
  *     Builds an sk_buff from the given packet gather list.  Returns the
  *     sk_buff or %NULL if sk_buff allocation failed.
  */
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
-                                 unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+                                        unsigned int skb_len,
+                                        unsigned int pull_len)
 {
        struct sk_buff *skb;
 
@@ -1443,7 +1444,7 @@ out:
  *     Releases the pages of a packet gather list.  We do not own the last
  *     page on the list and do not free it.
  */
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
 {
        int frag;
 
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
  *     on this queue.  If the system is under memory shortage use a fairly
  *     long delay to help recovery.
  */
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
 {
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
        int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
  * The MSI interrupt handler handles data events from SGE response queues as
  * well as error and other async events as they all use the same MSI vector.
  */
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
 {
        struct adapter *adapter = cookie;
 
index 7b756cf9474a90497de9987591c57cabfcc09f2d..ff78dfaec5087184021b863174f84adfeea59476 100644 (file)
@@ -2309,7 +2309,6 @@ err_out_release_regions:
 err_out_disable_device:
        pci_disable_device(pdev);
 err_out_free_netdev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 
        return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
                enic_iounmap(enic);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
                free_netdev(netdev);
        }
 }
index 5f5896e522d2c6068d7b6bc9c685cbc279be48d8..7080ad6c401409199b091021aeed398dda61062c 100644 (file)
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
 
 /* DM9000 network board routine ---------------------------- */
 
-static void
-dm9000_reset(board_info_t * db)
-{
-       dev_dbg(db->dev, "resetting device\n");
-
-       /* RESET device */
-       writeb(DM9000_NCR, db->io_addr);
-       udelay(200);
-       writeb(NCR_RST, db->io_data);
-       udelay(200);
-}
-
 /*
  *   Read a byte from I/O port
  */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
        writeb(value, db->io_data);
 }
 
+static void
+dm9000_reset(board_info_t *db)
+{
+       dev_dbg(db->dev, "resetting device\n");
+
+       /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
+        * The essential point is that we have to do a double reset, and the
+        * instruction is to set LBK into MAC internal loopback mode.
+        */
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100); /* Application note says at least 20 us */
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to first reset\n");
+
+       iow(db, DM9000_NCR, 0);
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100);
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to second reset\n");
+}
+
 /* routines for sending block to chip */
 
 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
 static void dm9000_show_carrier(board_info_t *db,
                                unsigned carrier, unsigned nsr)
 {
+       int lpa;
        struct net_device *ndev = db->ndev;
+       struct mii_if_info *mii = &db->mii;
        unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
 
-       if (carrier)
-               dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+       if (carrier) {
+               lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+               dev_info(db->dev,
+                        "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
                         ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
-                        (ncr & NCR_FDX) ? "full" : "half");
-       else
+                        (ncr & NCR_FDX) ? "full" : "half", lpa);
+       } else {
                dev_info(db->dev, "%s: link down\n", ndev->name);
+       }
 }
 
 static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
                        (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
+       iow(db, DM9000_GPR, 0);
 
-       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
-       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+       /* If we are dealing with DM9000B, some extra steps are required: a
+        * manual phy reset, and setting init params.
+        */
+       if (db->type == TYPE_DM9000B) {
+               dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
+               dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
+       }
 
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -1603,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
 
        if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
                mac_src = "platform data";
-               memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+               memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
        }
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
index eaab73cf27caffbbabb2904c47be6fc7941f5303..38148b0e3a952a348977de0b2d459b9b4436eee6 100644 (file)
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
        iounmap(de->regs);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index 263b92c00cbfb34dbc3b1904cd2ddacafdeb48cc..c05b66dfcc30bc9c8e6f300f3d1c47300cd73808 100644 (file)
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
        pci_disable_device (pdev);
 }
 
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
index 83139307861cd720edfd78d71d7abbe6b2265ff5..5ad9e3e3c0b8a5095564ae4c4369c3ac531c9591 100644 (file)
@@ -523,7 +523,6 @@ err_out_res:
 err_out_disable:
        pci_disable_device(pdev);
 err_out_free:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
                                        db->buf_pool_ptr, db->buf_pool_dma_ptr);
                pci_release_regions(pdev);
                free_netdev(dev);       /* free board information */
-
-               pci_set_drvdata(pdev, NULL);
        }
 
        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
index 4e8cfa2ac803abb13c9c692ef971618cc26d664b..add05f14b38be17311f3344bd23860064fac8ce1 100644 (file)
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
        pci_iounmap(pdev, tp->base_addr);
        free_netdev (dev);
        pci_release_regions (pdev);
-       pci_set_drvdata (pdev, NULL);
 
        /* pci_power_off (pdev, -1); */
 }
index 93845afe1cea105f21751c4bea54100405799b42..a5397b130724faa9bb4fb9d54881226eb03695bd 100644 (file)
@@ -429,7 +429,6 @@ err_out_release:
 err_out_disable:
        pci_disable_device(pdev);
 err_out_free:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
                                db->buf_pool_ptr, db->buf_pool_dma_ptr);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index c7b04ecf5b497ea26f969ce52eeaffccfc0ee5ef..62fe512bb2167fb682f63e26e2678e6a5df0cb6d 100644 (file)
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
                pci_iounmap(pdev, np->base_addr);
                free_netdev(dev);
        }
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 9b84cb04fe5fccded9f26fe010cf16ea6bb36c8a..ab7ebac6fbea0e6fe579b29d398b2c73fc369d4b 100644 (file)
@@ -289,7 +289,6 @@ out:
 err_unmap:
        pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-       pci_set_drvdata(pdev, NULL);
        dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
        dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
 
        unregister_netdev(dev);
        pci_iounmap(pdev, card->ioaddr);
-       pci_set_drvdata(pdev, NULL);
        dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
        dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
        free_netdev(dev);
index afa8e3af2c4d6840a9eee8c9c46d0e3f6df22a02..4fb756d219f700bfb82a37d62e9cb078fca22024 100644 (file)
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
                pci_release_regions (pdev);
                pci_disable_device (pdev);
        }
-       pci_set_drvdata (pdev, NULL);
 }
 
 static struct pci_driver rio_driver = {
index bf3bf6f22c998b7c7ef2563be2041737a15959fe..113cd799a131f925fcbd3b5946d4f185249d376f 100644 (file)
@@ -703,7 +703,6 @@ err_out_unmap_tx:
        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
                np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_res:
        pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
            pci_iounmap(pdev, np->base);
            pci_release_regions(pdev);
            free_netdev(dev);
-           pci_set_drvdata(pdev, NULL);
        }
 }
 
index ace5050dba3877ca8a3fe4995469ee3d16837c55..1bce77fdbd993240d0299901307f79aab7159da7 100644 (file)
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "4.9.134.0u"
+#define DRV_VER                        "4.9.224.0u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -88,7 +88,8 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define BE_MIN_MTU             256
 
 #define BE_NUM_VLANS_SUPPORTED 64
-#define BE_MAX_EQD             96u
+#define BE_UMC_NUM_VLANS_SUPPORTED     15
+#define BE_MAX_EQD             128u
 #define        BE_MAX_TX_FRAG_COUNT    30
 
 #define EVNT_Q_LEN             1024
@@ -200,6 +201,17 @@ struct be_eq_obj {
        struct be_adapter *adapter;
 } ____cacheline_aligned_in_smp;
 
+struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
+       bool enable;
+       u32 min_eqd;            /* in usecs */
+       u32 max_eqd;            /* in usecs */
+       u32 prev_eqd;           /* in usecs */
+       u32 et_eqd;             /* configured val when aic is off */
+       ulong jiffies;
+       u64 rx_pkts_prev;       /* Used to calculate RX pps */
+       u64 tx_reqs_prev;       /* Used to calculate TX pps */
+};
+
 struct be_mcc_obj {
        struct be_queue_info q;
        struct be_queue_info cq;
@@ -214,6 +226,7 @@ struct be_tx_stats {
        u64 tx_compl;
        ulong tx_jiffies;
        u32 tx_stops;
+       u32 tx_drv_drops;       /* pkts dropped by driver */
        struct u64_stats_sync sync;
        struct u64_stats_sync sync_compl;
 };
@@ -238,15 +251,12 @@ struct be_rx_page_info {
 struct be_rx_stats {
        u64 rx_bytes;
        u64 rx_pkts;
-       u64 rx_pkts_prev;
-       ulong rx_jiffies;
        u32 rx_drops_no_skbs;   /* skb allocation errors */
        u32 rx_drops_no_frags;  /* HW has no fetched frags */
        u32 rx_post_fail;       /* page post alloc failures */
        u32 rx_compl;
        u32 rx_mcast_pkts;
        u32 rx_compl_err;       /* completions with err set */
-       u32 rx_pps;             /* pkts per second */
        struct u64_stats_sync sync;
 };
 
@@ -315,6 +325,11 @@ struct be_drv_stats {
        u32 rx_input_fifo_overflow_drop;
        u32 pmem_fifo_overflow_drop;
        u32 jabber_events;
+       u32 rx_roce_bytes_lsd;
+       u32 rx_roce_bytes_msd;
+       u32 rx_roce_frames;
+       u32 roce_drops_payload_len;
+       u32 roce_drops_crc;
 };
 
 struct be_vf_cfg {
@@ -333,6 +348,7 @@ enum vf_state {
 
 #define BE_FLAGS_LINK_STATUS_INIT              1
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
+#define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
 #define BE_FLAGS_NAPI_ENABLED                  (1 << 9)
 #define BE_UC_PMAC_COUNT               30
 #define BE_VF_UC_PMAC_COUNT            2
@@ -403,6 +419,7 @@ struct be_adapter {
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
        struct be_drv_stats drv_stats;
+       struct be_aic_obj aic_obj[MAX_EVT_QS];
        u16 vlans_added;
        u8 vlan_tag[VLAN_N_VID];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
@@ -470,8 +487,8 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_max_vfs(adapter) && num_vfs && \
-                                        be_physfn(adapter))
+#define sriov_want(adapter)             (be_physfn(adapter) && \
+                                        (num_vfs || pci_num_vf(adapter->pdev)))
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
@@ -694,27 +711,27 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
        return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 }
 
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
-               u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+                 u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
 
 /*
  * internal function to initialize-cleanup roce device.
  */
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
 
 /*
  * internal function to open-close roce device during ifup-ifdown.
  */
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
 
 #endif                         /* BE_H */
index 1ab5dab11eff07ab349a0cc795ef5986fe1e9010..2d554366b3429547687a861fcaaa8df99e6529fe 100644 (file)
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        dev_err(&adapter->pdev->dev,
                                "opcode %d-%d failed:status %d-%d\n",
                                opcode, subsystem, compl_status, extd_status);
+
+                       if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+                               return extd_status;
                }
        }
 done:
@@ -1195,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
-               req->if_id = cpu_to_le16(adapter->if_handle);
        } else if (BEx_chip(adapter)) {
                if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
                        req->hdr.version = 2;
@@ -1203,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
                req->hdr.version = 2;
        }
 
+       if (req->hdr.version > 0)
+               req->if_id = cpu_to_le16(adapter->if_handle);
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1432,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
                OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
 
        /* version 1 of the cmd is not supported only by BE2 */
-       if (!BE2_chip(adapter))
+       if (BE2_chip(adapter))
+               hdr->version = 0;
+       if (BE3_chip(adapter) || lancer_chip(adapter))
                hdr->version = 1;
+       else
+               hdr->version = 2;
 
        be_mcc_notify(adapter);
        adapter->stats_cmd_sent = true;
@@ -1715,11 +1723,12 @@ err:
 /* set the EQ delay interval of an EQ to specified value
  * Uses async mcc
  */
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+                     int num)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_modify_eq_delay *req;
-       int status = 0;
+       int status = 0, i;
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -1733,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
 
-       req->num_eq = cpu_to_le32(1);
-       req->delay[0].eq_id = cpu_to_le32(eq_id);
-       req->delay[0].phase = 0;
-       req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+       req->num_eq = cpu_to_le32(num);
+       for (i = 0; i < num; i++) {
+               req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+               req->set_eqd[i].phase = 0;
+               req->set_eqd[i].delay_multiplier =
+                               cpu_to_le32(set_eqd[i].delay_multiplier);
+       }
 
        be_mcc_notify(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1812,6 +1823,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        } else if (flags & IFF_ALLMULTI) {
                req->if_flags_mask = req->if_flags =
                                cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+       } else if (flags & BE_FLAGS_VLAN_PROMISC) {
+               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
+
+               if (value == ON)
+                       req->if_flags =
+                               cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
        } else {
                struct netdev_hw_addr *ha;
                int i = 0;
@@ -3510,7 +3527,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
        struct be_cmd_enable_disable_vf *req;
        int status;
 
-       if (!lancer_chip(adapter))
+       if (BEx_chip(adapter))
                return 0;
 
        spin_lock_bh(&adapter->mcc_lock);
index d026226db88ccac0fcc70497ce2c8f6ef7b3e614..88708372d5e5e1d922e3690e71fdc1bce3686e78 100644 (file)
@@ -60,6 +60,8 @@ enum {
        MCC_STATUS_NOT_SUPPORTED = 66
 };
 
+#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES    0x16
+
 #define CQE_STATUS_COMPL_MASK          0xFFFF
 #define CQE_STATUS_COMPL_SHIFT         0       /* bits 0 - 15 */
 #define CQE_STATUS_EXTD_MASK           0xFFFF
@@ -1055,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
 } __packed;
 
 /******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
 struct be_cmd_req_modify_eq_delay {
        struct be_cmd_req_hdr hdr;
        u32 num_eq;
-       struct {
-               u32 eq_id;
-               u32 phase;
-               u32 delay_multiplier;
-       } delay[8];
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
 struct be_cmd_resp_modify_eq_delay {
@@ -1658,6 +1662,67 @@ struct be_erx_stats_v1 {
        u32 rsvd[4];
 };
 
+struct be_port_rxf_stats_v2 {
+       u32 rsvd0[10];
+       u32 roce_bytes_received_lsd;
+       u32 roce_bytes_received_msd;
+       u32 rsvd1[5];
+       u32 roce_frames_received;
+       u32 rx_crc_errors;
+       u32 rx_alignment_symbol_errors;
+       u32 rx_pause_frames;
+       u32 rx_priority_pause_frames;
+       u32 rx_control_frames;
+       u32 rx_in_range_errors;
+       u32 rx_out_range_errors;
+       u32 rx_frame_too_long;
+       u32 rx_address_filtered;
+       u32 rx_dropped_too_small;
+       u32 rx_dropped_too_short;
+       u32 rx_dropped_header_too_small;
+       u32 rx_dropped_tcp_length;
+       u32 rx_dropped_runt;
+       u32 rsvd2[10];
+       u32 rx_ip_checksum_errs;
+       u32 rx_tcp_checksum_errs;
+       u32 rx_udp_checksum_errs;
+       u32 rsvd3[7];
+       u32 rx_switched_unicast_packets;
+       u32 rx_switched_multicast_packets;
+       u32 rx_switched_broadcast_packets;
+       u32 rsvd4[3];
+       u32 tx_pauseframes;
+       u32 tx_priority_pauseframes;
+       u32 tx_controlframes;
+       u32 rsvd5[10];
+       u32 rxpp_fifo_overflow_drop;
+       u32 rx_input_fifo_overflow_drop;
+       u32 pmem_fifo_overflow_drop;
+       u32 jabber_events;
+       u32 rsvd6[3];
+       u32 rx_drops_payload_size;
+       u32 rx_drops_clipped_header;
+       u32 rx_drops_crc;
+       u32 roce_drops_payload_len;
+       u32 roce_drops_crc;
+       u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+       struct be_port_rxf_stats_v2 port[4];
+       u32 rsvd0[2];
+       u32 rx_drops_no_pbuf;
+       u32 rx_drops_no_txpb;
+       u32 rx_drops_no_erx_descr;
+       u32 rx_drops_no_tpre_descr;
+       u32 rsvd1[6];
+       u32 rx_drops_too_many_frags;
+       u32 rx_drops_invalid_ring;
+       u32 forwarded_packets;
+       u32 rx_drops_mtu;
+       u32 rsvd2[35];
+};
+
 struct be_hw_stats_v1 {
        struct be_rxf_stats_v1 rxf;
        u32 rsvd0[BE_TXP_SW_SZ];
@@ -1676,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
        struct be_hw_stats_v1 hw_stats;
 };
 
+struct be_erx_stats_v2 {
+       u32 rx_drops_no_fragments[136];     /* dwordS 0 to 135*/
+       u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+       struct be_rxf_stats_v2 rxf;
+       u32 rsvd0[BE_TXP_SW_SZ];
+       struct be_erx_stats_v2 erx;
+       struct be_pmem_stats pmem;
+       u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+       struct be_cmd_req_hdr hdr;
+       u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+       struct be_cmd_resp_hdr hdr;
+       struct be_hw_stats_v2 hw_stats;
+};
+
 /************** get fat capabilites *******************/
 #define MAX_MODULES 27
 #define MAX_MODES 4
@@ -1791,7 +1879,7 @@ struct be_nic_res_desc {
        u8 acpi_params;
        u8 wol_param;
        u16 rsvd7;
-       u32 rsvd8[3];
+       u32 rsvd8[7];
 } __packed;
 
 struct be_cmd_req_get_func_config {
@@ -1863,137 +1951,120 @@ struct be_cmd_resp_get_iface_list {
        struct be_if_desc if_desc;
 };
 
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                                bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-                       u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-                       int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
-                           u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
-                       u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
-                       struct be_queue_info *cq, struct be_queue_info *eq,
-                       bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
-                       struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
-                       struct be_queue_info *rxq, u16 cq_id,
-                       u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-                       int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
-                       struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
-                                   u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
-                       struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
-                       struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-               char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
-                       u16 *vtag_array, u32 num, bool untagged,
-                       bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
-                       u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
-                       u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+                         bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+                   u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+                   u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+                    u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+                    struct be_queue_info *eq, bool no_delay,
+                    int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+                      struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+                     u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+                    int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                            u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+                              struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+                     char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+                      u32 num, bool untagged, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
                        u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-                            u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
-                       u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
-                       u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
-                       struct be_dma_mem *cmd, u32 flash_oper,
-                       u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
-                                  struct be_dma_mem *cmd,
-                                  u32 data_size, u32 data_offset,
-                                  const char *obj_name,
-                                  u32 *data_written, u8 *change_status,
-                                  u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+                     u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+                           u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+                           u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+                         u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+                           u32 data_size, u32 data_offset,
+                           const char *obj_name, u32 *data_written,
+                           u8 *change_status, u8 *addn_status);
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
-               u32 data_size, u32 data_offset, const char *obj_name,
-               u32 *data_read, u32 *eof, u8 *addn_status);
+                          u32 data_size, u32 data_offset, const char *obj_name,
+                          u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                               int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
-                               struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
-                               u32 loopback_type, u32 pkt_size,
-                               u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
-                       u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
-                               u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
-                                   u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
-                                   u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-                                   bool *pmac_id_active, u32 *pmac_id,
-                                   u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
-                                u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
-                                               u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
-                         u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
-                                u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
-                                u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
-                                         struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
-                                         struct be_dma_mem *cmd,
-                                         struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+                        int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+                           struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+                        u32 loopback_type, u32 pkt_size, u32 num_pkts,
+                        u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+                       struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+                           struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+                       u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+                            u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+                            u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+                            bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+                       u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+                         u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+                         u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+                                  struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+                                  struct be_dma_mem *cmd,
+                                  struct be_fat_conf_params *cfgs);
+int lancer_wait_ready(struct be_adapter *adapter);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
 int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
-                                    u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
-                           struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+                    int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
index b440a1fac77b2883b3417eeab76861cc02ad42b9..08330034d9efab6f303025e95a17775caf657f50 100644 (file)
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(rx_drops_mtu)},
        /* Number of packets dropped due to random early drop function */
        {DRVSTAT_INFO(eth_red_drops)},
-       {DRVSTAT_INFO(be_on_die_temperature)}
+       {DRVSTAT_INFO(be_on_die_temperature)},
+       {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+       {DRVSTAT_INFO(rx_roce_bytes_msd)},
+       {DRVSTAT_INFO(rx_roce_frames)},
+       {DRVSTAT_INFO(roce_drops_payload_len)},
+       {DRVSTAT_INFO(roce_drops_crc)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        /* Number of times the TX queue was stopped due to lack
         * of spaces in the TXQ.
         */
-       {DRVSTAT_TX_INFO(tx_stops)}
+       {DRVSTAT_TX_INFO(tx_stops)},
+       /* Pkts dropped in the driver's transmit path */
+       {DRVSTAT_TX_INFO(tx_drv_drops)}
 };
 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
 
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_eq_obj *eqo = &adapter->eq_obj[0];
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
 
 
-       et->rx_coalesce_usecs = eqo->cur_eqd;
-       et->rx_coalesce_usecs_high = eqo->max_eqd;
-       et->rx_coalesce_usecs_low = eqo->min_eqd;
+       et->rx_coalesce_usecs = aic->prev_eqd;
+       et->rx_coalesce_usecs_high = aic->max_eqd;
+       et->rx_coalesce_usecs_low = aic->min_eqd;
 
-       et->tx_coalesce_usecs = eqo->cur_eqd;
-       et->tx_coalesce_usecs_high = eqo->max_eqd;
-       et->tx_coalesce_usecs_low = eqo->min_eqd;
+       et->tx_coalesce_usecs = aic->prev_eqd;
+       et->tx_coalesce_usecs_high = aic->max_eqd;
+       et->tx_coalesce_usecs_low = aic->min_eqd;
 
-       et->use_adaptive_rx_coalesce = eqo->enable_aic;
-       et->use_adaptive_tx_coalesce = eqo->enable_aic;
+       et->use_adaptive_rx_coalesce = aic->enable;
+       et->use_adaptive_tx_coalesce = aic->enable;
 
        return 0;
 }
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
        struct be_eq_obj *eqo;
        int i;
 
        for_all_evt_queues(adapter, eqo, i) {
-               eqo->enable_aic = et->use_adaptive_rx_coalesce;
-               eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
-               eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
-               eqo->eqd = et->rx_coalesce_usecs;
+               aic->enable = et->use_adaptive_rx_coalesce;
+               aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+               aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+               aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+               aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+               aic++;
        }
 
        return 0;
index 100b528b9bd0f85bf26b778b0d928966d11d419a..77b4a8ae87a6ca64684d7103ee53a08446e060bc 100644 (file)
@@ -306,9 +306,13 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
                struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
 
                return &cmd->hw_stats;
-       } else  {
+       } else if (BE3_chip(adapter)) {
                struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
 
+               return &cmd->hw_stats;
+       } else {
+               struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
                return &cmd->hw_stats;
        }
 }
@@ -320,9 +324,13 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
                struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 
                return &hw_stats->erx;
-       } else {
+       } else if (BE3_chip(adapter)) {
                struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 
+               return &hw_stats->erx;
+       } else {
+               struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
                return &hw_stats->erx;
        }
 }
@@ -422,6 +430,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 }
 
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+       struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+       struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+       struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+       struct be_port_rxf_stats_v2 *port_stats =
+                                       &rxf_stats->port[adapter->port_num];
+       struct be_drv_stats *drvs = &adapter->drv_stats;
+
+       be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+       drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+       drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+       drvs->rx_pause_frames = port_stats->rx_pause_frames;
+       drvs->rx_crc_errors = port_stats->rx_crc_errors;
+       drvs->rx_control_frames = port_stats->rx_control_frames;
+       drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+       drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+       drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+       drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+       drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+       drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+       drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+       drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+       drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+       drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+       drvs->rx_dropped_header_too_small =
+               port_stats->rx_dropped_header_too_small;
+       drvs->rx_input_fifo_overflow_drop =
+               port_stats->rx_input_fifo_overflow_drop;
+       drvs->rx_address_filtered = port_stats->rx_address_filtered;
+       drvs->rx_alignment_symbol_errors =
+               port_stats->rx_alignment_symbol_errors;
+       drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+       drvs->tx_pauseframes = port_stats->tx_pauseframes;
+       drvs->tx_controlframes = port_stats->tx_controlframes;
+       drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+       drvs->jabber_events = port_stats->jabber_events;
+       drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+       drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+       drvs->forwarded_packets = rxf_stats->forwarded_packets;
+       drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+       drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+       drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+       adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+       if (be_roce_supported(adapter))  {
+               drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+               drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+               drvs->rx_roce_frames = port_stats->roce_frames_received;
+               drvs->roce_drops_crc = port_stats->roce_drops_crc;
+               drvs->roce_drops_payload_len =
+                       port_stats->roce_drops_payload_len;
+       }
+}
+
 static void populate_lancer_stats(struct be_adapter *adapter)
 {
 
@@ -489,7 +551,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
 
 void be_parse_stats(struct be_adapter *adapter)
 {
-       struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+       struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
        struct be_rx_obj *rxo;
        int i;
        u32 erx_stat;
@@ -499,11 +561,13 @@ void be_parse_stats(struct be_adapter *adapter)
        } else {
                if (BE2_chip(adapter))
                        populate_be_v0_stats(adapter);
-               else
-                       /* for BE3 and Skyhawk */
+               else if (BE3_chip(adapter))
+                       /* for BE3 */
                        populate_be_v1_stats(adapter);
+               else
+                       populate_be_v2_stats(adapter);
 
-               /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+               /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
                for_all_rx_queues(adapter, rxo, i) {
                        erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
                        populate_erx_stats(adapter, rxo, erx_stat);
@@ -855,11 +919,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
        unsigned int eth_hdr_len;
        struct iphdr *ip;
 
-       /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
         * may cause a transmit stall on that port. So the work-around is to
-        * pad such packets to a 36-byte length.
+        * pad short packets (<= 32 bytes) to a 36-byte length.
         */
-       if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
                if (skb_padto(skb, 36))
                        goto tx_drop;
                skb->len = 36;
@@ -935,8 +999,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32 start = txq->head;
 
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-       if (!skb)
+       if (!skb) {
+               tx_stats(txo)->tx_drv_drops++;
                return NETDEV_TX_OK;
+       }
 
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
@@ -965,6 +1031,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
+               tx_stats(txo)->tx_drv_drops++;
                dev_kfree_skb_any(skb);
        }
        return NETDEV_TX_OK;
@@ -1013,18 +1080,40 @@ static int be_vid_config(struct be_adapter *adapter)
        status = be_cmd_vlan_config(adapter, adapter->if_handle,
                                    vids, num, 1, 0);
 
-       /* Set to VLAN promisc mode as setting VLAN filter failed */
        if (status) {
-               dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
-               dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
-               goto set_vlan_promisc;
+               /* Set to VLAN promisc mode as setting VLAN filter failed */
+               if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+                       goto set_vlan_promisc;
+               dev_err(&adapter->pdev->dev,
+                       "Setting HW VLAN filtering failed.\n");
+       } else {
+               if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
+                       /* hw VLAN filtering re-enabled. */
+                       status = be_cmd_rx_filter(adapter,
+                                                 BE_FLAGS_VLAN_PROMISC, OFF);
+                       if (!status) {
+                               dev_info(&adapter->pdev->dev,
+                                        "Disabling VLAN Promiscuous mode.\n");
+                               adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+                               dev_info(&adapter->pdev->dev,
+                                        "Re-Enabling HW VLAN filtering\n");
+                       }
+               }
        }
 
        return status;
 
 set_vlan_promisc:
-       status = be_cmd_vlan_config(adapter, adapter->if_handle,
-                                   NULL, 0, 1, 1);
+       dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+
+       status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
+       if (!status) {
+               dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+               dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
+               adapter->flags |= BE_FLAGS_VLAN_PROMISC;
+       } else
+               dev_err(&adapter->pdev->dev,
+                       "Failed to enable VLAN Promiscuous mode.\n");
        return status;
 }
 
@@ -1033,10 +1122,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!lancer_chip(adapter) && !be_physfn(adapter)) {
-               status = -EINVAL;
-               goto ret;
-       }
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1144,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!lancer_chip(adapter) && !be_physfn(adapter)) {
-               status = -EINVAL;
-               goto ret;
-       }
-
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
@@ -1188,8 +1268,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
 
        vi->vf = vf;
        vi->tx_rate = vf_cfg->tx_rate;
-       vi->vlan = vf_cfg->vlan_tag;
-       vi->qos = 0;
+       vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+       vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
        return 0;
@@ -1199,28 +1279,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
                        int vf, u16 vlan, u8 qos)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status = 0;
 
        if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (vf >= adapter->num_vfs || vlan > 4095)
+       if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
                return -EINVAL;
 
-       if (vlan) {
-               if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+       if (vlan || qos) {
+               vlan |= qos << VLAN_PRIO_SHIFT;
+               if (vf_cfg->vlan_tag != vlan) {
                        /* If this is new value, program it. Else skip. */
-                       adapter->vf_cfg[vf].vlan_tag = vlan;
-
-                       status = be_cmd_set_hsw_config(adapter, vlan,
-                               vf + 1, adapter->vf_cfg[vf].if_handle, 0);
+                       vf_cfg->vlan_tag = vlan;
+                       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+                                                      vf_cfg->if_handle, 0);
                }
        } else {
                /* Reset Transparent Vlan Tagging. */
-               adapter->vf_cfg[vf].vlan_tag = 0;
-               vlan = adapter->vf_cfg[vf].def_vid;
+               vf_cfg->vlan_tag = 0;
+               vlan = vf_cfg->def_vid;
                status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
-                       adapter->vf_cfg[vf].if_handle, 0);
+                                              vf_cfg->if_handle, 0);
        }
 
 
@@ -1261,53 +1342,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        return status;
 }
 
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+                         ulong now)
 {
-       struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
-       ulong now = jiffies;
-       ulong delta = now - stats->rx_jiffies;
-       u64 pkts;
-       unsigned int start, eqd;
+       aic->rx_pkts_prev = rx_pkts;
+       aic->tx_reqs_prev = tx_pkts;
+       aic->jiffies = now;
+}
 
-       if (!eqo->enable_aic) {
-               eqd = eqo->eqd;
-               goto modify_eqd;
-       }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
+       int eqd, i, num = 0, start;
+       struct be_aic_obj *aic;
+       struct be_eq_obj *eqo;
+       struct be_rx_obj *rxo;
+       struct be_tx_obj *txo;
+       u64 rx_pkts, tx_pkts;
+       ulong now;
+       u32 pps, delta;
 
-       if (eqo->idx >= adapter->num_rx_qs)
-               return;
+       for_all_evt_queues(adapter, eqo, i) {
+               aic = &adapter->aic_obj[eqo->idx];
+               if (!aic->enable) {
+                       if (aic->jiffies)
+                               aic->jiffies = 0;
+                       eqd = aic->et_eqd;
+                       goto modify_eqd;
+               }
 
-       stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+               rxo = &adapter->rx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+                       rx_pkts = rxo->stats.rx_pkts;
+               } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
 
-       /* Wrapped around */
-       if (time_before(now, stats->rx_jiffies)) {
-               stats->rx_jiffies = now;
-               return;
-       }
+               txo = &adapter->tx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+                       tx_pkts = txo->stats.tx_reqs;
+               } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
 
-       /* Update once a second */
-       if (delta < HZ)
-               return;
 
-       do {
-               start = u64_stats_fetch_begin_bh(&stats->sync);
-               pkts = stats->rx_pkts;
-       } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
-       stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
-       stats->rx_pkts_prev = pkts;
-       stats->rx_jiffies = now;
-       eqd = (stats->rx_pps / 110000) << 3;
-       eqd = min(eqd, eqo->max_eqd);
-       eqd = max(eqd, eqo->min_eqd);
-       if (eqd < 10)
-               eqd = 0;
+               /* Skip, if wrapped around or first calculation */
+               now = jiffies;
+               if (!aic->jiffies || time_before(now, aic->jiffies) ||
+                   rx_pkts < aic->rx_pkts_prev ||
+                   tx_pkts < aic->tx_reqs_prev) {
+                       be_aic_update(aic, rx_pkts, tx_pkts, now);
+                       continue;
+               }
+
+               delta = jiffies_to_msecs(now - aic->jiffies);
+               pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+                       (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+               eqd = (pps / 15000) << 2;
 
+               if (eqd < 8)
+                       eqd = 0;
+               eqd = min_t(u32, eqd, aic->max_eqd);
+               eqd = max_t(u32, eqd, aic->min_eqd);
+
+               be_aic_update(aic, rx_pkts, tx_pkts, now);
 modify_eqd:
-       if (eqd != eqo->cur_eqd) {
-               be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
-               eqo->cur_eqd = eqd;
+               if (eqd != aic->prev_eqd) {
+                       set_eqd[num].delay_multiplier = (eqd * 65)/100;
+                       set_eqd[num].eq_id = eqo->q.id;
+                       aic->prev_eqd = eqd;
+                       num++;
+               }
        }
+
+       if (num)
+               be_cmd_modify_eqd(adapter, set_eqd, num);
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1924,6 +2031,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 {
        struct be_queue_info *eq;
        struct be_eq_obj *eqo;
+       struct be_aic_obj *aic;
        int i, rc;
 
        adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1932,11 +2040,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
        for_all_evt_queues(adapter, eqo, i) {
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
+               aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->tx_budget = BE_TX_BUDGET;
                eqo->idx = i;
-               eqo->max_eqd = BE_MAX_EQD;
-               eqo->enable_aic = true;
+               aic->max_eqd = BE_MAX_EQD;
+               aic->enable = true;
 
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2923,7 +3032,8 @@ static int be_vf_setup(struct be_adapter *adapter)
                        goto err;
                vf_cfg->def_vid = def_vlan;
 
-               be_cmd_enable_vf(adapter, vf + 1);
+               if (!old_vfs)
+                       be_cmd_enable_vf(adapter, vf + 1);
        }
 
        if (!old_vfs) {
@@ -2948,12 +3058,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
 
-       if (BE3_chip(adapter) && be_physfn(adapter)) {
+       if (BE3_chip(adapter) && sriov_want(adapter)) {
                int max_vfs;
 
                max_vfs = pci_sriov_get_totalvfs(pdev);
                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               use_sriov = res->max_vfs && num_vfs;
+               use_sriov = res->max_vfs;
        }
 
        if (be_physfn(adapter))
@@ -2963,12 +3073,15 @@ static void BEx_get_resources(struct be_adapter *adapter,
 
        if (adapter->function_mode & FLEX10_MODE)
                res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+       else if (adapter->function_mode & UMC_ENABLED)
+               res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
        else
                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
        res->max_mcast_mac = BE_MAX_MC;
 
+       /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
        if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
-           !be_physfn(adapter))
+           !be_physfn(adapter) || (adapter->port_num > 1))
                res->max_tx_qs = 1;
        else
                res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3010,14 +3123,6 @@ static int be_get_resources(struct be_adapter *adapter)
                adapter->res = res;
        }
 
-       /* For BE3 only check if FW suggests a different max-txqs value */
-       if (BE3_chip(adapter)) {
-               status = be_cmd_get_profile_config(adapter, &res, 0);
-               if (!status && res.max_tx_qs)
-                       adapter->res.max_tx_qs =
-                               min(adapter->res.max_tx_qs, res.max_tx_qs);
-       }
-
        /* For Lancer, SH etc read per-function resource limits from FW.
         * GET_FUNC_CONFIG returns per function guaranteed limits.
         * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3242,7 +3347,7 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
 
-       if (be_physfn(adapter) && num_vfs) {
+       if (sriov_want(adapter)) {
                if (be_max_vfs(adapter))
                        be_vf_setup(adapter);
                else
@@ -4061,9 +4166,11 @@ static int be_stats_init(struct be_adapter *adapter)
                cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
        else if (BE2_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
-       else
-               /* BE3 and Skyhawk */
+       else if (BE3_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+       else
+               /* ALL non-BE ASICs */
+               cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
 
        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
                                      GFP_KERNEL);
@@ -4097,7 +4204,6 @@ static void be_remove(struct pci_dev *pdev)
 
        pci_disable_pcie_error_reporting(pdev);
 
-       pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 
@@ -4246,7 +4352,6 @@ static void be_worker(struct work_struct *work)
        struct be_adapter *adapter =
                container_of(work, struct be_adapter, work.work);
        struct be_rx_obj *rxo;
-       struct be_eq_obj *eqo;
        int i;
 
        /* when interrupts are not yet enabled, just reap any pending
@@ -4277,8 +4382,7 @@ static void be_worker(struct work_struct *work)
                }
        }
 
-       for_all_evt_queues(adapter, eqo, i)
-               be_eqd_update(adapter, eqo);
+       be_eqd_update(adapter);
 
 reschedule:
        adapter->work_counter++;
@@ -4335,28 +4439,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
-               status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (status < 0) {
-                       dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
-                       goto free_netdev;
-               }
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
-               status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (!status)
-                       status = dma_set_coherent_mask(&pdev->dev,
-                                                      DMA_BIT_MASK(32));
+               status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
                }
        }
 
-       status = pci_enable_pcie_error_reporting(pdev);
-       if (status)
-               dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+       if (be_physfn(adapter)) {
+               status = pci_enable_pcie_error_reporting(pdev);
+               if (!status)
+                       dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+       }
 
        status = be_ctrl_init(adapter);
        if (status)
@@ -4427,7 +4525,6 @@ ctrl_clean:
        be_ctrl_cleanup(adapter);
 free_netdev:
        free_netdev(netdev);
-       pci_set_drvdata(pdev, NULL);
 rel_reg:
        pci_release_regions(pdev);
 disable_dev:
index c706b7a9397ed6f13dedfee142f99ae50a98ff98..4b22a9579f859e7e9d61da22d5175632d6441d2d 100644 (file)
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
                pci_iounmap(pdev, np->mem);
                free_netdev(dev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        } else
                printk(KERN_ERR "fealnx: remove for unknown device\n");
 }
index 6b60582ce8cf1c7a82dd3da1d69acb64981014f4..56f2f608a9f43aa27a32b038d21daf83ffe7f682 100644 (file)
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        mac_addr = of_get_mac_address(ofdev->dev.of_node);
        if (mac_addr)
-               memcpy(ndev->dev_addr, mac_addr, 6);
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
 
        ret = fep->ops->allocate_bd(ndev);
        if (ret)
index c4eaadeb572fa3dcd97396afffd961f936d0189d..d6d810cb97c79b80da7991f2ee423406b89853af 100644 (file)
@@ -88,6 +88,7 @@
 
 #include <asm/io.h>
 #include <asm/reg.h>
+#include <asm/mpc85xx.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
        }
 }
 
-static void gfar_detect_errata(struct gfar_private *priv)
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
 {
-       struct device *dev = &priv->ofdev->dev;
        unsigned int pvr = mfspr(SPRN_PVR);
        unsigned int svr = mfspr(SPRN_SVR);
        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_76;
 
-       /* MPC8313 and MPC837x all rev */
-       if ((pvr == 0x80850010 && mod == 0x80b0) ||
-           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
-               priv->errata |= GFAR_ERRATA_A002;
+       /* MPC8313 Rev < 2.0 */
+       if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+               priv->errata |= GFAR_ERRATA_12;
+}
+
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+       unsigned int svr = mfspr(SPRN_SVR);
 
-       /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
-       if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
-           (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+       if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
                priv->errata |= GFAR_ERRATA_12;
+       if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+               priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+       struct device *dev = &priv->ofdev->dev;
+
+       /* no plans to fix */
+       priv->errata |= GFAR_ERRATA_A002;
+
+       if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+               __gfar_detect_errata_85xx(priv);
+       else /* non-mpc85xx parts, i.e. e300 core based */
+               __gfar_detect_errata_83xx(priv);
 
        if (priv->errata)
                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
        /* Normaly TSEC should not hang on GRS commands, so we should
         * actually wait for IEVENT_GRSC flag.
         */
-       if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+       if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
                return 0;
 
        /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
@@ -2900,7 +2918,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        struct gfar_priv_rx_q *rx_queue = NULL;
        int work_done = 0, work_done_per_q = 0;
        int i, budget_per_q = 0;
-       int has_tx_work;
+       int has_tx_work = 0;
        unsigned long rstat_rxf;
        int num_act_queues;
 
@@ -2915,62 +2933,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        if (num_act_queues)
                budget_per_q = budget/num_act_queues;
 
-       while (1) {
-               has_tx_work = 0;
-               for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
-                       tx_queue = priv->tx_queue[i];
-                       /* run Tx cleanup to completion */
-                       if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
-                               gfar_clean_tx_ring(tx_queue);
-                               has_tx_work = 1;
-                       }
+       for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+               tx_queue = priv->tx_queue[i];
+               /* run Tx cleanup to completion */
+               if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+                       gfar_clean_tx_ring(tx_queue);
+                       has_tx_work = 1;
                }
+       }
 
-               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
-                       /* skip queue if not active */
-                       if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
-                               continue;
-
-                       rx_queue = priv->rx_queue[i];
-                       work_done_per_q =
-                               gfar_clean_rx_ring(rx_queue, budget_per_q);
-                       work_done += work_done_per_q;
-
-                       /* finished processing this queue */
-                       if (work_done_per_q < budget_per_q) {
-                               /* clear active queue hw indication */
-                               gfar_write(&regs->rstat,
-                                          RSTAT_CLEAR_RXF0 >> i);
-                               rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
-                               num_act_queues--;
-
-                               if (!num_act_queues)
-                                       break;
-                               /* recompute budget per Rx queue */
-                               budget_per_q =
-                                       (budget - work_done) / num_act_queues;
-                       }
-               }
+       for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+               /* skip queue if not active */
+               if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+                       continue;
 
-               if (work_done >= budget)
-                       break;
+               rx_queue = priv->rx_queue[i];
+               work_done_per_q =
+                       gfar_clean_rx_ring(rx_queue, budget_per_q);
+               work_done += work_done_per_q;
+
+               /* finished processing this queue */
+               if (work_done_per_q < budget_per_q) {
+                       /* clear active queue hw indication */
+                       gfar_write(&regs->rstat,
+                                  RSTAT_CLEAR_RXF0 >> i);
+                       num_act_queues--;
+
+                       if (!num_act_queues)
+                               break;
+               }
+       }
 
-               if (!num_act_queues && !has_tx_work) {
+       if (!num_act_queues && !has_tx_work) {
 
-                       napi_complete(napi);
+               napi_complete(napi);
 
-                       /* Clear the halt bit in RSTAT */
-                       gfar_write(&regs->rstat, gfargrp->rstat);
+               /* Clear the halt bit in RSTAT */
+               gfar_write(&regs->rstat, gfargrp->rstat);
 
-                       gfar_write(&regs->imask, IMASK_DEFAULT);
+               gfar_write(&regs->imask, IMASK_DEFAULT);
 
-                       /* If we are coalescing interrupts, update the timer
-                        * Otherwise, clear it
-                        */
-                       gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
-                                                 gfargrp->tx_bit_map);
-                       break;
-               }
+               /* If we are coalescing interrupts, update the timer
+                * Otherwise, clear it
+                */
+               gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+                                         gfargrp->tx_bit_map);
        }
 
        return work_done;
index 04112b98ff5d9231f1ed60e4df438fa88b6b7947..114c58f9d8d25a83d235e0cafa976a93db3c7426 100644 (file)
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
        *fpr = gfar_read(&regs->rqfpr);
 }
 
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
-               int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+                  u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 098f133908ae016058f326225e2ed58b9709d592..e006a09ba8990050f3bc9a91e101ae8455419a87 100644 (file)
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
        err = -ENODEV;
 
        etsects->caps = ptp_gianfar_caps;
-       etsects->cksel = DEFAULT_CKSEL;
+
+       if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+               etsects->cksel = DEFAULT_CKSEL;
 
        if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
            get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
index 5930c39672db25eee560dabd16cf38d1ca54636b..d58a3dfc95c296086f3d729819e42bc758ef5f6f 100644 (file)
@@ -3899,7 +3899,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               memcpy(dev->dev_addr, mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        ugeth->ug_info = ug_info;
        ugeth->dev = device;
index 6231bc02b9648f9c178ef984c22c362bc08c10cb..1085257385d2c2d1312b0a31c5272eec484a8d4b 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_FUJITSU
        bool "Fujitsu devices"
        default y
-       depends on ISA || PCMCIA
+       depends on PCMCIA
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
index 91227d03274e02685d5e96fe6096e56e6af999eb..37860096f744005a17c7dc805f890e396ac8933e 100644 (file)
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
        if (request_irq(dev->irq, hp100_interrupt,
                        lp->bus == HP100_BUS_PCI || lp->bus ==
                        HP100_BUS_EISA ? IRQF_SHARED : 0,
-                       "hp100", dev)) {
+                       dev->name, dev)) {
                printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
                return -EAGAIN;
        }
index e38816145395c9e2b08416934e0b10b6352b4507..a15877affc9bd6b5791e1df9fa0e8ec2c2905c0d 100644 (file)
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        lp->sa_cmd.cmd.command = CmdSASetup;
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, 6);       /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index d653bac4cfc4e2be49d1ab1ff9a1247eae5460aa..861fa15e1e81b11e31f0d8a2f03907056689ce9d 100644 (file)
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &dma->cf_cmd.cmd);
 
        DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
        DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
        i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev) {
                        if (!cnt--)
                                break;
-                       memcpy(cp, ha->addr, 6);
+                       memcpy(cp, ha->addr, ETH_ALEN);
                        if (i596_debug > 1)
                                DEB(DEB_MULTI,
                                    printk(KERN_DEBUG
                                           "%s: Adding address %pM\n",
                                           dev->name, cp));
-                       cp += 6;
+                       cp += ETH_ALEN;
                }
                DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
                i596_add_cmd(dev, &cmd->cmd);
index 6b5c7222342c5e96144febc4a55bd7fb7dfc137e..ef21a2e10180c516a5ee3935b15af5fb52b506ab 100644 (file)
@@ -2676,7 +2676,7 @@ static int emac_init_config(struct emac_instance *dev)
                       np->full_name);
                return -ENXIO;
        }
-       memcpy(dev->ndev->dev_addr, p, 6);
+       memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
 
        /* IAHT and GAHT filter parameterization */
        if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
index 59a92d5870b5535826a6fec333f27d1796e062db..9c45efe4c8fecfdc0e16371ee67a622e4bfcbdbe 100644 (file)
 struct emac_instance;
 struct mal_instance;
 
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
 
 # define DBG_LEVEL             1
 
index 668bceeff4a2112eb98eece3405c9594dd8accef..d4f1374d19000ea0562d9c4c738e1ea56caffb4b 100644 (file)
@@ -56,15 +56,15 @@ struct rgmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_RGMII
 
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 350b7096a041a310f620911f7f2e448bc7d4e8ca..4d5f336f07b3669c15c916ea79ce8e90bc9efb47 100644 (file)
@@ -72,13 +72,13 @@ struct tah_instance {
 
 #ifdef CONFIG_IBM_EMAC_TAH
 
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 455bfb0854934d684164d5105faa45ae89df03bb..0959c55b14591dc7f2d58b1946585eeb0da04d68 100644 (file)
@@ -53,15 +53,15 @@ struct zmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_ZMII
 
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 # define zmii_init()           0
index 5d41aee69d1646e3b42a1d91b8873825344043cc..952d795230a479c79c0684ee0849a05e5e0ff631 100644 (file)
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
                netdev_for_each_mc_addr(ha, netdev) {
                        /* add the multicast address to the filter table */
                        unsigned long mcast_addr = 0;
-                       memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+                       memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
                        lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
                                                   IbmVethMcastAddFilter,
                                                   mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 
        adapter->mac_addr = 0;
-       memcpy(&adapter->mac_addr, mac_addr_p, 6);
+       memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
 
        netdev->irq = dev->irq;
        netdev->netdev_ops = &ibmveth_netdev_ops;
index bdf5023724e768945f8ec71ceb32a27d897dba6e..25045ae071711f03cee9ccabf9898cf2abebbc21 100644 (file)
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
 
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops ipg_netdev_ops = {
index ada6e210279f3750e26ea5720ca3bbe5057abf2c..cbaba4442d4b226d18691059a4e122029040ff0a 100644 (file)
@@ -2985,7 +2985,6 @@ err_out_free_res:
 err_out_disable_pdev:
        pci_disable_device(pdev);
 err_out_free_dev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        return err;
 }
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
                free_netdev(netdev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 26d9cd59ec75a25451185a8cb933c318f9068912..58c147271a362e68914d55d97b71b327b194e86b 100644 (file)
@@ -325,7 +325,7 @@ enum e1000_state_t {
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
        netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
 #define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
 
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
 
 #endif /* _E1000_H_ */
index 59ad007dd5aa09a6123cbd25db7c7b8f4e06f1b7..34672f87726cb7b9ab0cdd1ac294b52a031ba4c8 100644 (file)
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        pci_using_dac = 0;
        if ((hw->bus_type == e1000_bus_type_pcix) &&
-           !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               /* according to DMA-API-HOWTO, coherent calls will always
-                * succeed if the set call did
-                */
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+           !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        pr_err("No usable DMA config, aborting\n");
                        goto err_dma;
                }
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        }
 
        netdev->netdev_ops = &e1000_netdev_ops;
index ad0edd11015d7b40a14d79f06afdab8ecec76cef..0150f7fc893d4ae6985096f17e5a4678f39fc3b3 100644 (file)
@@ -472,26 +472,25 @@ enum latency_range {
 extern char e1000e_driver_name[];
 extern const char e1000e_driver_version[];
 
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-                                                   struct rtnl_link_stats64
-                                                   *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
 
 extern unsigned int copybreak;
 
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
        return hw->phy.ops.write_reg_locked(hw, offset, data);
 }
 
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
 
 static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 {
index 4ef786775acb7ca6665873cae61f520a6fbc8b45..aedd5736a87d53862fa2be4176e9762fcd8df18a 100644 (file)
@@ -6553,21 +6553,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
index b5252eb8a6c757861ff2c22a3efea81400c3934d..49572dcdba8744d6658352df6c06d884ccb3ae3c 100644 (file)
@@ -347,9 +347,9 @@ struct i40e_vsi {
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
-       /* These are arrays of rings, allocated at run-time */
-       struct i40e_ring *rx_rings;
-       struct i40e_ring *tx_rings;
+       /* These are containers of ring pointers, allocated at run-time */
+       struct i40e_ring **rx_rings;
+       struct i40e_ring **tx_rings;
 
        u16 work_limit;
        /* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +366,7 @@ struct i40e_vsi {
        u8  dtype;
 
        /* List of q_vectors allocated to this VSI */
-       struct i40e_q_vector *q_vectors;
+       struct i40e_q_vector **q_vectors;
        int num_q_vectors;
        int base_vector;
 
@@ -422,8 +422,9 @@ struct i40e_q_vector {
 
        u8 num_ringpairs;       /* total number of ring pairs in vector */
 
-       char name[IFNAMSIZ + 9];
        cpumask_t affinity_mask;
+       struct rcu_head rcu;    /* to avoid race with update stats on free */
+       char name[IFNAMSIZ + 9];
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -544,6 +545,7 @@ static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
index 0c524fa9f8111a092c07d69a442c75dea43f0f42..cfef7fc32cdd4643382f6d0125589fd1e3de6a95 100644 (file)
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        if (cmd_details) {
-               memcpy(details, cmd_details,
-                      sizeof(struct i40e_asq_cmd_details));
+               *details = *cmd_details;
 
                /* If the cmd_details are defined copy the cookie.  The
                 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 
        /* if the desc is available copy the temp desc to the right place */
-       memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+       *desc_on_ring = *desc;
 
        /* if buff is not NULL assume indirect command */
        if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 
        /* if ready, copy the desc back to temp */
        if (i40e_asq_done(hw)) {
-               memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+               *desc = *desc_on_ring;
                if (buff != NULL)
                        memcpy(buff, dma_buff->va, buff_size);
                retval = le16_to_cpu(desc->retval);
index c21df7bc3b1dd9b348050c1593866b33e42c7cf6..1e4ea134975ac43e8288132e6e4b93bacb2f4b7e 100644 (file)
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 
        /* save link status information */
        if (link)
-               memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+               *link = *hw_link_info;
 
        /* flag cleared so helper functions don't call AQ again */
        hw->phy.get_link_info = false;
index 8dbd91f64b74d815fff682c6fe07570182e3182c..ef4cb1cf31f2d392a4ed4e1ae2e0059919423317 100644 (file)
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
                                   size_t count, loff_t *ppos)
 {
        struct i40e_pf *pf = filp->private_data;
-       char dump_request_buf[16];
        bool seid_found = false;
-       int bytes_not_copied;
        long seid = -1;
        int buflen = 0;
        int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
        /* don't allow partial writes */
        if (*ppos != 0)
                return 0;
-       if (count >= sizeof(dump_request_buf))
-               return -ENOSPC;
-
-       bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
-       if (bytes_not_copied > 0)
-               count -= bytes_not_copied;
-       dump_request_buf[count] = '\0';
 
        /* decode the SEID given to be dumped */
-       ret = kstrtol(dump_request_buf, 0, &seid);
-       if (ret < 0) {
-               dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
-                        dump_request_buf);
+       ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+       if (ret) {
+               dev_info(&pf->pdev->dev, "bad seid value\n");
        } else if (seid == 0) {
                seid_found = true;
 
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
                        memcpy(p, vsi, len);
                        p += len;
 
-                       len = (sizeof(struct i40e_q_vector)
-                               * vsi->num_q_vectors);
-                       memcpy(p, vsi->q_vectors, len);
-                       p += len;
-
-                       len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
-                       memcpy(p, vsi->tx_rings, len);
-                       p += len;
-                       memcpy(p, vsi->rx_rings, len);
-                       p += len;
+                       if (vsi->num_q_vectors) {
+                               len = (sizeof(struct i40e_q_vector)
+                                       * vsi->num_q_vectors);
+                               memcpy(p, vsi->q_vectors, len);
+                               p += len;
+                       }
 
-                       for (i = 0; i < vsi->num_queue_pairs; i++) {
-                               len = sizeof(struct i40e_tx_buffer);
-                               memcpy(p, vsi->tx_rings[i].tx_bi, len);
+                       if (vsi->num_queue_pairs) {
+                               len = (sizeof(struct i40e_ring) *
+                                     vsi->num_queue_pairs);
+                               memcpy(p, vsi->tx_rings, len);
+                               p += len;
+                               memcpy(p, vsi->rx_rings, len);
                                p += len;
                        }
-                       for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+                       if (vsi->tx_rings[0]) {
+                               len = sizeof(struct i40e_tx_buffer);
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+                                       p += len;
+                               }
                                len = sizeof(struct i40e_rx_buffer);
-                               memcpy(p, vsi->rx_rings[i].rx_bi, len);
-                               p += len;
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+                                       p += len;
+                               }
                        }
 
                        /* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
                 vsi->tx_restart, vsi->tx_busy,
                 vsi->rx_buf_failed, vsi->rx_page_failed);
-       if (vsi->rx_rings) {
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: desc = %p\n",
-                                i, vsi->rx_rings[i].desc);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
-                                i, vsi->rx_rings[i].dev,
-                                vsi->rx_rings[i].netdev,
-                                vsi->rx_rings[i].rx_bi);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-                                i, vsi->rx_rings[i].state,
-                                vsi->rx_rings[i].queue_index,
-                                vsi->rx_rings[i].reg_idx);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-                                i, vsi->rx_rings[i].rx_hdr_len,
-                                vsi->rx_rings[i].rx_buf_len,
-                                vsi->rx_rings[i].dtype);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                                i, vsi->rx_rings[i].hsplit,
-                                vsi->rx_rings[i].next_to_use,
-                                vsi->rx_rings[i].next_to_clean,
-                                vsi->rx_rings[i].ring_active);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
-                                i, vsi->rx_rings[i].rx_stats.packets,
-                                vsi->rx_rings[i].rx_stats.bytes,
-                                vsi->rx_rings[i].rx_stats.non_eop_descs);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
-                                i,
-                                vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
-                               vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
-                                i, vsi->rx_rings[i].size,
-                                (long unsigned int)vsi->rx_rings[i].dma);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
-                                i, vsi->rx_rings[i].vsi,
-                                vsi->rx_rings[i].q_vector);
-               }
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+               if (!rx_ring)
+                       continue;
+
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: desc = %p\n",
+                        i, rx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+                        i, rx_ring->dev,
+                        rx_ring->netdev,
+                        rx_ring->rx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, rx_ring->state,
+                        rx_ring->queue_index,
+                        rx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+                        i, rx_ring->rx_hdr_len,
+                        rx_ring->rx_buf_len,
+                        rx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, rx_ring->hsplit,
+                        rx_ring->next_to_use,
+                        rx_ring->next_to_clean,
+                        rx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+                        i, rx_ring->stats.packets,
+                        rx_ring->stats.bytes,
+                        rx_ring->rx_stats.non_eop_descs);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+                        i,
+                        rx_ring->rx_stats.alloc_rx_page_failed,
+                       rx_ring->rx_stats.alloc_rx_buff_failed);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, rx_ring->size,
+                        (long unsigned int)rx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, rx_ring->vsi,
+                        rx_ring->q_vector);
        }
-       if (vsi->tx_rings) {
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: desc = %p\n",
-                                i, vsi->tx_rings[i].desc);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
-                                i, vsi->tx_rings[i].dev,
-                                vsi->tx_rings[i].netdev,
-                                vsi->tx_rings[i].tx_bi);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-                                i, vsi->tx_rings[i].state,
-                                vsi->tx_rings[i].queue_index,
-                                vsi->tx_rings[i].reg_idx);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: dtype = %d\n",
-                                i, vsi->tx_rings[i].dtype);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                                i, vsi->tx_rings[i].hsplit,
-                                vsi->tx_rings[i].next_to_use,
-                                vsi->tx_rings[i].next_to_clean,
-                                vsi->tx_rings[i].ring_active);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
-                                i, vsi->tx_rings[i].tx_stats.packets,
-                                vsi->tx_rings[i].tx_stats.bytes,
-                                vsi->tx_rings[i].tx_stats.restart_queue);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
-                                i,
-                                vsi->tx_rings[i].tx_stats.tx_busy,
-                                vsi->tx_rings[i].tx_stats.completed,
-                                vsi->tx_rings[i].tx_stats.tx_done_old);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
-                                i, vsi->tx_rings[i].size,
-                                (long unsigned int)vsi->tx_rings[i].dma);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
-                                i, vsi->tx_rings[i].vsi,
-                                vsi->tx_rings[i].q_vector);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: DCB tc = %d\n",
-                                i, vsi->tx_rings[i].dcb_tc);
-               }
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+               if (!tx_ring)
+                       continue;
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: desc = %p\n",
+                        i, tx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+                        i, tx_ring->dev,
+                        tx_ring->netdev,
+                        tx_ring->tx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, tx_ring->state,
+                        tx_ring->queue_index,
+                        tx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dtype = %d\n",
+                        i, tx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, tx_ring->hsplit,
+                        tx_ring->next_to_use,
+                        tx_ring->next_to_clean,
+                        tx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+                        i, tx_ring->stats.packets,
+                        tx_ring->stats.bytes,
+                        tx_ring->tx_stats.restart_queue);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+                        i,
+                        tx_ring->tx_stats.tx_busy,
+                        tx_ring->tx_stats.tx_done_old);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, tx_ring->size,
+                        (long unsigned int)tx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, tx_ring->vsi,
+                        tx_ring->q_vector);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: DCB tc = %d\n",
+                        i, tx_ring->dcb_tc);
        }
+       rcu_read_unlock();
        dev_info(&pf->pdev->dev,
                 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
                 vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
        dev_info(&pf->pdev->dev,
                 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
                 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
-       if (vsi->q_vectors) {
-               for (i = 0; i < vsi->num_q_vectors; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    q_vectors[%i]: base index = %ld\n",
-                                i, ((long int)*vsi->q_vectors[i].rx.ring-
-                                       (long int)*vsi->q_vectors[0].rx.ring)/
-                                       sizeof(struct i40e_ring));
-               }
-       }
        dev_info(&pf->pdev->dev,
                 "    num_q_vectors = %i, base_vector = %i\n",
                 vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                return;
        }
        if (is_rx_ring)
-               ring = vsi->rx_rings[ring_id];
+               ring = *vsi->rx_rings[ring_id];
        else
-               ring = vsi->tx_rings[ring_id];
+               ring = *vsi->tx_rings[ring_id];
        if (cnt == 2) {
                dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
                         vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                      size_t count, loff_t *ppos)
 {
        struct i40e_pf *pf = filp->private_data;
+       char *cmd_buf, *cmd_buf_tmp;
        int bytes_not_copied;
        struct i40e_vsi *vsi;
        u8 *print_buf_start;
        u8 *print_buf;
-       char *cmd_buf;
        int vsi_seid;
        int veb_seid;
        int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
 
+       cmd_buf_tmp = strchr(cmd_buf, '\n');
+       if (cmd_buf_tmp) {
+               *cmd_buf_tmp = '\0';
+               count = cmd_buf_tmp - cmd_buf + 1;
+       }
+
        print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
        if (!print_buf_start)
                goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_veb_release(pf->veb[i]);
 
        } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
-               u8 ma[6];
-               int vlan = 0;
                struct i40e_mac_filter *f;
+               int vlan = 0;
+               u8 ma[6];
                int ret;
 
                cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 ma, vlan, vsi_seid, f, ret);
 
        } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
-               u8 ma[6];
                int vlan = 0;
+               u8 ma[6];
                int ret;
 
                cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 ma, vlan, vsi_seid, ret);
 
        } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
-               int v;
-               u16 vid;
                i40e_status ret;
+               u16 vid;
+               int v;
 
                cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
                if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
                   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
                struct i40e_fdir_data fd_data;
-               int ret;
                u16 packet_len, i, j = 0;
                char *asc_packet;
                bool add = false;
+               int ret;
 
                asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
                                     GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        }
                } else if (strncmp(&cmd_buf[5],
                           "get local", 9) == 0) {
+                       u16 llen, rlen;
                        int ret, i;
                        u8 *buff;
-                       u16 llen, rlen;
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        kfree(buff);
                        buff = NULL;
                } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+                       u16 llen, rlen;
                        int ret, i;
                        u8 *buff;
-                       u16 llen, rlen;
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
-               /* Read at least 512 words */
-               if (buffer_len == 0)
-                       buffer_len = 512;
+               /* set the max length */
+               buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
 
                bytes = 2 * buffer_len;
+
+               /* read at least 1k bytes, no more than 4kB */
+               bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
                buff = kzalloc(bytes, GFP_KERNEL);
                if (!buff)
                        goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
        struct i40e_vsi *vsi;
+       char *buf_tmp;
        int vsi_seid;
        int i, cnt;
 
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                count -= bytes_not_copied;
        i40e_dbg_netdev_ops_buf[count] = '\0';
 
+       buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+       if (buf_tmp) {
+               *buf_tmp = '\0';
+               count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+       }
+
        if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
                cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
                if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                        goto netdev_ops_write_done;
                }
                for (i = 0; i < vsi->num_q_vectors; i++)
-                       napi_schedule(&vsi->q_vectors[i].napi);
+                       napi_schedule(&vsi->q_vectors[i]->napi);
                dev_info(&pf->pdev->dev, "napi called\n");
        } else {
                dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
  **/
 void i40e_dbg_pf_init(struct i40e_pf *pf)
 {
-       struct dentry *pfile __attribute__((unused));
+       struct dentry *pfile;
        const char *name = pci_name(pf->pdev);
+       const struct device *dev = &pf->pdev->dev;
 
        pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
-       if (pf->i40e_dbg_pf) {
-               pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
-                                           pf, &i40e_dbg_command_fops);
-               pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
-                                           &i40e_dbg_dump_fops);
-               pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
-                                           pf, &i40e_dbg_netdev_ops_fops);
-       } else {
-               dev_info(&pf->pdev->dev,
-                        "debugfs entry for %s failed\n", name);
-       }
+       if (!pf->i40e_dbg_pf)
+               return;
+
+       pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_command_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_dump_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_netdev_ops_fops);
+       if (!pfile)
+               goto create_failed;
+
+       return;
+
+create_failed:
+       dev_info(dev, "debugfs dir/file for %s failed\n", name);
+       debugfs_remove_recursive(pf->i40e_dbg_pf);
+       return;
 }
 
 /**
index 9a76b8cec76c0b9ef700c8fb6c68c75fd4e09e05..1b86138fa9e19fb0a87510db8d05c5dafd347a61 100644 (file)
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
        ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
-       ring->rx_pending = vsi->rx_rings[0].count;
-       ring->tx_pending = vsi->tx_rings[0].count;
+       ring->rx_pending = vsi->rx_rings[0]->count;
+       ring->tx_pending = vsi->tx_rings[0]->count;
        ring->rx_mini_pending = 0;
        ring->rx_jumbo_pending = 0;
 }
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
        new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
 
        /* if nothing to do return success */
-       if ((new_tx_count == vsi->tx_rings[0].count) &&
-           (new_rx_count == vsi->rx_rings[0].count))
+       if ((new_tx_count == vsi->tx_rings[0]->count) &&
+           (new_rx_count == vsi->rx_rings[0]->count))
                return 0;
 
        while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
        if (!netif_running(vsi->netdev)) {
                /* simple case - set for the next time the netdev is started */
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       vsi->tx_rings[i].count = new_tx_count;
-                       vsi->rx_rings[i].count = new_rx_count;
+                       vsi->tx_rings[i]->count = new_tx_count;
+                       vsi->rx_rings[i]->count = new_rx_count;
                }
                goto done;
        }
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
         */
 
        /* alloc updated Tx resources */
-       if (new_tx_count != vsi->tx_rings[0].count) {
+       if (new_tx_count != vsi->tx_rings[0]->count) {
                netdev_info(netdev,
                            "Changing Tx descriptor count from %d to %d.\n",
-                           vsi->tx_rings[0].count, new_tx_count);
+                           vsi->tx_rings[0]->count, new_tx_count);
                tx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
                if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        /* clone ring and setup updated count */
-                       tx_rings[i] = vsi->tx_rings[i];
+                       tx_rings[i] = *vsi->tx_rings[i];
                        tx_rings[i].count = new_tx_count;
                        err = i40e_setup_tx_descriptors(&tx_rings[i]);
                        if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
        }
 
        /* alloc updated Rx resources */
-       if (new_rx_count != vsi->rx_rings[0].count) {
+       if (new_rx_count != vsi->rx_rings[0]->count) {
                netdev_info(netdev,
                            "Changing Rx descriptor count from %d to %d\n",
-                           vsi->rx_rings[0].count, new_rx_count);
+                           vsi->rx_rings[0]->count, new_rx_count);
                rx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
                if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        /* clone ring and setup updated count */
-                       rx_rings[i] = vsi->rx_rings[i];
+                       rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
        if (tx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       i40e_free_tx_resources(&vsi->tx_rings[i]);
-                       vsi->tx_rings[i] = tx_rings[i];
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
+                       *vsi->tx_rings[i] = tx_rings[i];
                }
                kfree(tx_rings);
                tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
        if (rx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       i40e_free_rx_resources(&vsi->rx_rings[i]);
-                       vsi->rx_rings[i] = rx_rings[i];
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
+                       *vsi->rx_rings[i] = rx_rings[i];
                }
                kfree(rx_rings);
                rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        char *p;
        int j;
        struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+       unsigned int start;
 
        i40e_update_stats(vsi);
 
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->tx_rings[j].tx_stats.packets;
-               data[i++] = vsi->tx_rings[j].tx_stats.bytes;
-       }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->rx_rings[j].rx_stats.packets;
-               data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+       rcu_read_lock();
+       for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+               struct i40e_ring *rx_ring;
+
+               if (!tx_ring)
+                       continue;
+
+               /* process Tx ring statistics */
+               do {
+                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       data[i] = tx_ring->stats.packets;
+                       data[i + 1] = tx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+               /* Rx ring is the 2nd half of the queue pair */
+               rx_ring = &tx_ring[1];
+               do {
+                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       data[i + 2] = rx_ring->stats.packets;
+                       data[i + 3] = rx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
        }
+       rcu_read_unlock();
        if (vsi == pf->vsi[pf->lan_vsi]) {
                for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                        p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
                        p += ETH_GSTRING_LEN;
-               }
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
        }
 
        vector = vsi->base_vector;
-       q_vector = vsi->q_vectors;
-       for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
                q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
index 601d482694ea8486273371a6cb6d4b0a968cd33b..41a79df373d5fb137afb30629cdf26d5d7657d71 100644 (file)
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
        mem->size = ALIGN(size, alignment);
        mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
                                      &mem->pa, GFP_KERNEL);
-       if (mem->va)
-               return 0;
+       if (!mem->va)
+               return -ENOMEM;
 
-       return -ENOMEM;
+       return 0;
 }
 
 /**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
        mem->size = size;
        mem->va = kzalloc(size, GFP_KERNEL);
 
-       if (mem->va)
-               return 0;
+       if (!mem->va)
+               return -ENOMEM;
 
-       return -ENOMEM;
+       return 0;
 }
 
 /**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
                         u16 needed, u16 id)
 {
        int ret = -ENOMEM;
-       int i = 0;
-       int j = 0;
+       int i, j;
 
        if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
                dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
 
        /* start the linear search with an imperfect hint */
        i = pile->search_hint;
-       while (i < pile->num_entries && ret < 0) {
+       while (i < pile->num_entries) {
                /* skip already allocated entries */
                if (pile->list[i] & I40E_PILE_VALID_BIT) {
                        i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
                                pile->list[i+j] = id | I40E_PILE_VALID_BIT;
                        ret = i;
                        pile->search_hint = i + j;
+                       break;
                } else {
                        /* not enough, so skip over it and continue looking */
                        i += j;
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  **/
 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                                             struct net_device *netdev,
-                                            struct rtnl_link_stats64 *storage)
+                                            struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+       int i;
+
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *tx_ring, *rx_ring;
+               u64 bytes, packets;
+               unsigned int start;
 
-       *storage = *i40e_get_vsi_stats_struct(vsi);
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+               if (!tx_ring)
+                       continue;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       packets = tx_ring->stats.packets;
+                       bytes   = tx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+               stats->tx_packets += packets;
+               stats->tx_bytes   += bytes;
+               rx_ring = &tx_ring[1];
 
-       return storage;
+               do {
+                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       packets = rx_ring->stats.packets;
+                       bytes   = rx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+               stats->rx_packets += packets;
+               stats->rx_bytes   += bytes;
+       }
+       rcu_read_unlock();
+
+       /* following stats updated by ixgbe_watchdog_task() */
+       stats->multicast        = vsi_stats->multicast;
+       stats->tx_errors        = vsi_stats->tx_errors;
+       stats->tx_dropped       = vsi_stats->tx_dropped;
+       stats->rx_errors        = vsi_stats->rx_errors;
+       stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
+       stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+       return stats;
 }
 
 /**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
        if (vsi->rx_rings)
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       memset(&vsi->rx_rings[i].rx_stats, 0 ,
-                              sizeof(vsi->rx_rings[i].rx_stats));
-                       memset(&vsi->tx_rings[i].tx_stats, 0,
-                              sizeof(vsi->tx_rings[i].tx_stats));
+                       memset(&vsi->rx_rings[i]->stats, 0 ,
+                              sizeof(vsi->rx_rings[i]->stats));
+                       memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+                              sizeof(vsi->rx_rings[i]->rx_stats));
+                       memset(&vsi->tx_rings[i]->stats, 0 ,
+                              sizeof(vsi->tx_rings[i]->stats));
+                       memset(&vsi->tx_rings[i]->tx_stats, 0,
+                              sizeof(vsi->tx_rings[i]->tx_stats));
                }
        vsi->stat_offsets_loaded = false;
 }
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = &vsi->tx_rings[i];
+                       struct i40e_ring *ring = vsi->tx_rings[i];
                        clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
                }
        }
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = &vsi->tx_rings[i];
+                       struct i40e_ring *ring = vsi->tx_rings[i];
 
                        tc = ring->dcb_tc;
                        if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
        tx_restart = tx_busy = 0;
        rx_page = 0;
        rx_buf = 0;
+       rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                struct i40e_ring *p;
+               u64 bytes, packets;
+               unsigned int start;
 
-               p = &vsi->rx_rings[q];
-               rx_b += p->rx_stats.bytes;
-               rx_p += p->rx_stats.packets;
-               rx_buf += p->rx_stats.alloc_rx_buff_failed;
-               rx_page += p->rx_stats.alloc_rx_page_failed;
+               /* locate Tx ring */
+               p = ACCESS_ONCE(vsi->tx_rings[q]);
 
-               p = &vsi->tx_rings[q];
-               tx_b += p->tx_stats.bytes;
-               tx_p += p->tx_stats.packets;
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               tx_b += bytes;
+               tx_p += packets;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
+
+               /* Rx queue is part of the same block as Tx queue */
+               p = &p[1];
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               rx_b += bytes;
+               rx_p += packets;
+               rx_buf += p->rx_stats.alloc_rx_buff_failed;
+               rx_page += p->rx_stats.alloc_rx_page_failed;
        }
+       rcu_read_unlock();
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
        vsi->rx_page_failed = rx_page;
@@ -1388,7 +1448,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
-       i40e_status ret = 0;
+       i40e_status aq_ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
@@ -1449,28 +1509,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               ret = i40e_aq_remove_macvlan(&pf->hw,
+                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
                                            vsi->seid, del_list, num_del,
                                            NULL);
                                num_del = 0;
                                memset(del_list, 0, sizeof(*del_list));
 
-                               if (ret)
+                               if (aq_ret)
                                        dev_info(&pf->pdev->dev,
                                                 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-                                                ret,
+                                                aq_ret,
                                                 pf->hw.aq.asq_last_status);
                        }
                }
                if (num_del) {
-                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
                        num_del = 0;
 
-                       if (ret)
+                       if (aq_ret)
                                dev_info(&pf->pdev->dev,
                                         "ignoring delete macvlan error, err %d, aq_err %d\n",
-                                        ret, pf->hw.aq.asq_last_status);
+                                        aq_ret, pf->hw.aq.asq_last_status);
                }
 
                kfree(del_list);
@@ -1515,32 +1575,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               ret = i40e_aq_add_macvlan(&pf->hw,
-                                                         vsi->seid,
-                                                         add_list,
-                                                         num_add,
-                                                         NULL);
+                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                            add_list, num_add,
+                                                            NULL);
                                num_add = 0;
 
-                               if (ret)
+                               if (aq_ret)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
                }
                if (num_add) {
-                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                 add_list, num_add, NULL);
+                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                    add_list, num_add, NULL);
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && (!ret)) {
+               if (add_happened && (!aq_ret)) {
                        /* do nothing */;
-               } else if (add_happened && (ret)) {
+               } else if (add_happened && (aq_ret)) {
                        dev_info(&pf->pdev->dev,
                                 "add filter failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                      &vsi->state)) {
@@ -1556,28 +1614,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-                                                           vsi->seid,
-                                                           cur_multipromisc,
-                                                           NULL);
-               if (ret)
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+                                                              vsi->seid,
+                                                              cur_multipromisc,
+                                                              NULL);
+               if (aq_ret)
                        dev_info(&pf->pdev->dev,
                                 "set multi promisc failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-                                                         vsi->seid,
-                                                         cur_promisc,
-                                                         NULL);
-               if (ret)
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+                                                            vsi->seid,
+                                                            cur_promisc, NULL);
+               if (aq_ret)
                        dev_info(&pf->pdev->dev,
                                 "set uni promisc failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
        }
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1847,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
  * @vsi: the vsi being configured
  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * Return: 0 on success or negative otherwise
  **/
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
 {
@@ -1863,37 +1922,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
  * @netdev: network interface to be adjusted
  * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
  **/
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       int ret;
+       int ret = 0;
 
        if (vid > 4095)
-               return 0;
+               return -EINVAL;
+
+       netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
 
-       netdev_info(vsi->netdev, "adding %pM vid=%d\n",
-                   netdev->dev_addr, vid);
        /* If the network stack called us with vid = 0, we should
         * indicate to i40e_vsi_add_vlan() that we want to receive
         * any traffic (i.e. with any vlan tag, or untagged)
         */
        ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
 
-       if (!ret) {
-               if (vid < VLAN_N_VID)
-                       set_bit(vid, vsi->active_vlans);
-       }
+       if (!ret && (vid < VLAN_N_VID))
+               set_bit(vid, vsi->active_vlans);
 
-       return 0;
+       return ret;
 }
 
 /**
  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
  * @netdev: network interface to be adjusted
  * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for adding vlan ids
  **/
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1962,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
-       netdev_info(vsi->netdev, "removing %pM vid=%d\n",
-                   netdev->dev_addr, vid);
+       netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
+
        /* return code is ignored as there is nothing a user
         * can do about failure to remove and a log message was
-        * already printed from another function
+        * already printed from the other function
         */
        i40e_vsi_kill_vlan(vsi, vid);
 
        clear_bit(vid, vsi->active_vlans);
+
        return 0;
 }
 
@@ -1936,10 +1998,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
  * @vsi: the vsi being adjusted
  * @vid: the vlan id to set as a PVID
  **/
-i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
        struct i40e_vsi_context ctxt;
-       i40e_status ret;
+       i40e_status aq_ret;
 
        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
        vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +2010,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 
        ctxt.seid = vsi->seid;
        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
                         "%s: update vsi failed, aq_err=%d\n",
                         __func__, vsi->back->hw.aq.asq_last_status);
+               return -ENOENT;
        }
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -1985,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
        int i, err = 0;
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+               err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
 
        return err;
 }
@@ -2001,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++)
-               if (vsi->tx_rings[i].desc)
-                       i40e_free_tx_resources(&vsi->tx_rings[i]);
+               if (vsi->tx_rings[i]->desc)
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
 }
 
 /**
@@ -2020,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
        int i, err = 0;
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+               err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
        return err;
 }
 
@@ -2035,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++)
-               if (vsi->rx_rings[i].desc)
-                       i40e_free_rx_resources(&vsi->rx_rings[i]);
+               if (vsi->rx_rings[i]->desc)
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
 }
 
 /**
@@ -2111,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 
        /* Now associate this queue with this PCI function */
        qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
-       qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
-                                               & I40E_QTX_CTL_PF_INDX_MASK);
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+                   I40E_QTX_CTL_PF_INDX_MASK);
        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
        i40e_flush(hw);
 
@@ -2220,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
        int err = 0;
        u16 i;
 
-       for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
-               err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+       for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+               err = i40e_configure_tx_ring(vsi->tx_rings[i]);
 
        return err;
 }
@@ -2271,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+               err = i40e_configure_rx_ring(vsi->rx_rings[i]);
 
        return err;
 }
@@ -2295,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
                qoffset = vsi->tc_config.tc_info[n].qoffset;
                qcount = vsi->tc_config.tc_info[n].qcount;
                for (i = qoffset; i < (qoffset + qcount); i++) {
-                       struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-                       struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+                       struct i40e_ring *rx_ring = vsi->rx_rings[i];
+                       struct i40e_ring *tx_ring = vsi->tx_rings[i];
                        rx_ring->dcb_tc = n;
                        tx_ring->dcb_tc = n;
                }
@@ -2351,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
         */
        qp = vsi->base_queue;
        vector = vsi->base_vector;
-       q_vector = vsi->q_vectors;
-       for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2432,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
  **/
 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 {
-       struct i40e_q_vector *q_vector = vsi->q_vectors;
+       struct i40e_q_vector *q_vector = vsi->q_vectors[0];
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        u32 val;
@@ -2469,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
  **/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 val;
@@ -2497,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
        wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-       i40e_flush(hw);
+       /* skip the flush */
 }
 
 /**
@@ -2509,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
 {
        struct i40e_q_vector *q_vector = data;
 
-       if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
        napi_schedule(&q_vector->napi);
@@ -2526,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
 {
        struct i40e_q_vector *q_vector = data;
 
-       if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
        pr_info("fdir ring cleaning needed\n");
@@ -2551,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
        int vector, err;
 
        for (vector = 0; vector < q_vectors; vector++) {
-               struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+               struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
 
-               if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+               if (q_vector->tx.ring && q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
                        tx_int_idx++;
-               } else if (q_vector->rx.ring[0]) {
+               } else if (q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "rx", rx_int_idx++);
-               } else if (q_vector->tx.ring[0]) {
+               } else if (q_vector->tx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "tx", tx_int_idx++);
                } else {
@@ -2608,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
-               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
        }
 
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2646,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
                i40e_irq_dynamic_enable_icr0(pf);
        }
 
+       i40e_flush(&pf->hw);
        return 0;
 }
 
@@ -2678,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        icr0 = rd32(hw, I40E_PFINT_ICR0);
 
-       /* if sharing a legacy IRQ, we might get called w/o an intr pending */
-       if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
-               return IRQ_NONE;
-
        val = rd32(hw, I40E_PFINT_DYN_CTL0);
        val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
        wr32(hw, I40E_PFINT_DYN_CTL0, val);
 
+       /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+       if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+               return IRQ_NONE;
+
        ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
 
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2699,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
                qval = rd32(hw, I40E_QINT_TQCTL(0));
                qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
                wr32(hw, I40E_QINT_TQCTL(0), qval);
-               i40e_flush(hw);
 
                if (!test_bit(__I40E_DOWN, &pf->state))
-                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
        }
 
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2761,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        /* re-enable interrupt causes */
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-       i40e_flush(hw);
        if (!test_bit(__I40E_DOWN, &pf->state)) {
                i40e_service_event_schedule(pf);
                i40e_irq_dynamic_enable_icr0(pf);
@@ -2771,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
 }
 
 /**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
  * @vsi: the VSI being configured
  * @v_idx: vector index
- * @r_idx: rx queue index
+ * @qp_idx: queue pair index
  **/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
 {
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-       struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
-       rx_ring->q_vector = q_vector;
-       q_vector->rx.ring[q_vector->rx.count] = rx_ring;
-       q_vector->rx.count++;
-       q_vector->rx.latency_range = I40E_LOW_LATENCY;
-       q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @t_idx: tx queue index
- **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
-{
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-       struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+       struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
 
        tx_ring->q_vector = q_vector;
-       q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+       tx_ring->next = q_vector->tx.ring;
+       q_vector->tx.ring = tx_ring;
        q_vector->tx.count++;
-       q_vector->tx.latency_range = I40E_LOW_LATENCY;
-       q_vector->num_ringpairs++;
-       q_vector->vsi = vsi;
+
+       rx_ring->q_vector = q_vector;
+       rx_ring->next = q_vector->rx.ring;
+       q_vector->rx.ring = rx_ring;
+       q_vector->rx.count++;
 }
 
 /**
@@ -2820,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 {
        int qp_remaining = vsi->num_queue_pairs;
        int q_vectors = vsi->num_q_vectors;
-       int qp_per_vector;
+       int num_ringpairs;
        int v_start = 0;
        int qp_idx = 0;
 
@@ -2828,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
         * group them so there are multiple queues per vector.
         */
        for (; v_start < q_vectors && qp_remaining; v_start++) {
-               qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
-               for (; qp_per_vector;
-                    qp_per_vector--, qp_idx++, qp_remaining--) {
-                       map_vector_to_rxq(vsi, v_start, qp_idx);
-                       map_vector_to_txq(vsi, v_start, qp_idx);
+               struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+               num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+               q_vector->num_ringpairs = num_ringpairs;
+
+               q_vector->rx.count = 0;
+               q_vector->tx.count = 0;
+               q_vector->rx.ring = NULL;
+               q_vector->tx.ring = NULL;
+
+               while (num_ringpairs--) {
+                       map_vector_to_qp(vsi, v_start, qp_idx);
+                       qp_idx++;
+                       qp_remaining--;
                }
        }
 }
@@ -2884,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
        pf->flags |= I40E_FLAG_IN_NETPOLL;
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = 0; i < vsi->num_q_vectors; i++)
-                       i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+                       i40e_msix_clean_rings(0, vsi->q_vectors[i]);
        } else {
                i40e_intr(pf->pdev->irq, netdev);
        }
@@ -3070,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
                        u16 vector = i + base;
 
                        /* free only the irqs that were actually requested */
-                       if (vsi->q_vectors[i].num_ringpairs == 0)
+                       if (vsi->q_vectors[i]->num_ringpairs == 0)
                                continue;
 
                        /* clear the affinity_mask in the IRQ descriptor */
                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
                                              NULL);
                        free_irq(pf->msix_entries[vector].vector,
-                                &vsi->q_vectors[i]);
+                                vsi->q_vectors[i]);
 
                        /* Tear down the interrupt queue link list
                         *
@@ -3160,6 +3218,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
        }
 }
 
+/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *ring;
+
+       if (!q_vector)
+               return;
+
+       /* disassociate q_vector from rings */
+       i40e_for_each_ring(ring, q_vector->tx)
+               ring->q_vector = NULL;
+
+       i40e_for_each_ring(ring, q_vector->rx)
+               ring->q_vector = NULL;
+
+       /* only VSI w/ an associated netdev is set up w/ NAPI */
+       if (vsi->netdev)
+               netif_napi_del(&q_vector->napi);
+
+       vsi->q_vectors[v_idx] = NULL;
+
+       kfree_rcu(q_vector, rcu);
+}
+
 /**
  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  * @vsi: the VSI being un-configured
@@ -3171,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
 {
        int v_idx;
 
-       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
-               struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
-               int r_idx;
-
-               if (!q_vector)
-                       continue;
-
-               /* disassociate q_vector from rings */
-               for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
-                       q_vector->tx.ring[r_idx]->q_vector = NULL;
-               for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
-                       q_vector->rx.ring[r_idx]->q_vector = NULL;
-
-               /* only VSI w/ an associated netdev is set up w/ NAPI */
-               if (vsi->netdev)
-                       netif_napi_del(&q_vector->napi);
-       }
-       kfree(vsi->q_vectors);
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               i40e_free_q_vector(vsi, v_idx);
 }
 
 /**
@@ -3238,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_enable(&vsi->q_vectors[q_idx].napi);
+               napi_enable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3253,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_disable(&vsi->q_vectors[q_idx].napi);
+               napi_disable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3326,7 +3401,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
  **/
 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
 {
-       int num_tc = 0, i;
+       u8 num_tc = 0;
+       int i;
 
        /* Scan the ETS Config Priority Table to find
         * traffic class enabled for a given priority
@@ -3341,9 +3417,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
        /* Traffic class index starts from zero so
         * increment to return the actual count
         */
-       num_tc++;
-
-       return num_tc;
+       return num_tc + 1;
 }
 
 /**
@@ -3451,28 +3525,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret;
        u32 tc_bw_max;
-       int ret;
        int i;
 
        /* Get the VSI level BW configuration */
-       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (aq_ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't get pf vsi bw config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
-               return ret;
+                        aq_ret, pf->hw.aq.asq_last_status);
+               return -EINVAL;
        }
 
        /* Get the VSI level BW configuration per TC */
-       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
-                                              &bw_ets_config,
-                                              NULL);
-       if (ret) {
+       aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+                                                 NULL);
+       if (aq_ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
-               return ret;
+                        aq_ret, pf->hw.aq.asq_last_status);
+               return -EINVAL;
        }
 
        if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3567,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                /* 3 bits out of 4 for each TC */
                vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
        }
-       return ret;
+
+       return 0;
 }
 
 /**
@@ -3505,30 +3579,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
  *
  * Returns 0 on success, negative value on failure
  **/
-static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
-                                      u8 enabled_tc,
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-       int i, ret = 0;
+       i40e_status aq_ret;
+       int i;
 
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
-                                      &bw_data, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+                                         NULL);
+       if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
                         "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
                         __func__, vsi->back->hw.aq.asq_last_status);
-               return ret;
+               return -EINVAL;
        }
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                vsi->info.qs_handle[i] = bw_data.qs_handles[i];
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -3701,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
 
        if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
            (vsi->netdev)) {
+               netdev_info(vsi->netdev, "NIC Link is Up\n");
                netif_tx_start_all_queues(vsi->netdev);
                netif_carrier_on(vsi->netdev);
+       } else if (vsi->netdev) {
+               netdev_info(vsi->netdev, "NIC Link is Down\n");
        }
        i40e_service_event_schedule(pf);
 
@@ -3770,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
        i40e_napi_disable_all(vsi);
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               i40e_clean_tx_ring(&vsi->tx_rings[i]);
-               i40e_clean_rx_ring(&vsi->rx_rings[i]);
+               i40e_clean_tx_ring(vsi->tx_rings[i]);
+               i40e_clean_rx_ring(vsi->rx_rings[i]);
        }
 }
 
@@ -4151,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
        if (new_link == old_link)
                return;
 
-       netdev_info(pf->vsi[pf->lan_vsi]->netdev,
-                   "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+       if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+               netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+                           "NIC Link is %s\n", (new_link ? "Up" : "Down"));
 
        /* Notify the base of the switch tree connected to
         * the link.  Floating VEBs are not notified.
@@ -4197,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       set_check_for_tx_hang(&vsi->tx_rings[i]);
+                       set_check_for_tx_hang(vsi->tx_rings[i]);
                        if (test_bit(__I40E_HANG_CHECK_ARMED,
-                                    &vsi->tx_rings[i].state))
+                                    &vsi->tx_rings[i]->state))
                                armed++;
                }
 
@@ -4535,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
        bool new_vsi = false;
        int err, i;
 
-       if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+       if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+                          I40E_FLAG_FDIR_ATR_ENABLED)))
                return;
 
        pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4935,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
 {
        int ret = -ENODEV;
        struct i40e_vsi *vsi;
+       int sz_vectors;
+       int sz_rings;
        int vsi_idx;
        int i;
 
@@ -4960,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
                vsi_idx = i;             /* Found one! */
        } else {
                ret = -ENODEV;
-               goto err_alloc_vsi;  /* out of VSI slots! */
+               goto unlock_pf;  /* out of VSI slots! */
        }
        pf->next_vsi = ++i;
 
        vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
        if (!vsi) {
                ret = -ENOMEM;
-               goto err_alloc_vsi;
+               goto unlock_pf;
        }
        vsi->type = type;
        vsi->back = pf;
@@ -4980,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
        INIT_LIST_HEAD(&vsi->mac_filter_list);
 
-       i40e_set_num_rings_in_vsi(vsi);
+       ret = i40e_set_num_rings_in_vsi(vsi);
+       if (ret)
+               goto err_rings;
+
+       /* allocate memory for ring pointers */
+       sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+       vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+       if (!vsi->tx_rings) {
+               ret = -ENOMEM;
+               goto err_rings;
+       }
+       vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+       /* allocate memory for q_vector pointers */
+       sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+       vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+       if (!vsi->q_vectors) {
+               ret = -ENOMEM;
+               goto err_vectors;
+       }
 
        /* Setup default MSIX irq handler for VSI */
        i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
        pf->vsi[vsi_idx] = vsi;
        ret = vsi_idx;
-err_alloc_vsi:
+       goto unlock_pf;
+
+err_vectors:
+       kfree(vsi->tx_rings);
+err_rings:
+       pf->next_vsi = i - 1;
+       kfree(vsi);
+unlock_pf:
        mutex_unlock(&pf->switch_mutex);
        return ret;
 }
@@ -5028,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       /* free the ring and vector containers */
+       kfree(vsi->q_vectors);
+       kfree(vsi->tx_rings);
+
        pf->vsi[vsi->idx] = NULL;
        if (vsi->idx < pf->next_vsi)
                pf->next_vsi = vsi->idx;
@@ -5040,6 +5151,24 @@ free_vsi:
        return 0;
 }
 
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (vsi->tx_rings[0])
+               for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+                       kfree_rcu(vsi->tx_rings[i], rcu);
+                       vsi->tx_rings[i] = NULL;
+                       vsi->rx_rings[i] = NULL;
+               }
+
+       return 0;
+}
+
 /**
  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
  * @vsi: the VSI being configured
@@ -5047,28 +5176,16 @@ free_vsi:
 static int i40e_alloc_rings(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       int ret = 0;
        int i;
 
-       vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
-                               sizeof(struct i40e_ring), GFP_KERNEL);
-       if (!vsi->rx_rings) {
-               ret = -ENOMEM;
-               goto err_alloc_rings;
-       }
-
-       vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
-                               sizeof(struct i40e_ring), GFP_KERNEL);
-       if (!vsi->tx_rings) {
-               ret = -ENOMEM;
-               kfree(vsi->rx_rings);
-               goto err_alloc_rings;
-       }
-
        /* Set basic values in the rings to be used later during open() */
        for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-               struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-               struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+               struct i40e_ring *tx_ring;
+               struct i40e_ring *rx_ring;
+
+               tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+               if (!tx_ring)
+                       goto err_out;
 
                tx_ring->queue_index = i;
                tx_ring->reg_idx = vsi->base_queue + i;
@@ -5079,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                tx_ring->count = vsi->num_desc;
                tx_ring->size = 0;
                tx_ring->dcb_tc = 0;
+               vsi->tx_rings[i] = tx_ring;
 
+               rx_ring = &tx_ring[1];
                rx_ring->queue_index = i;
                rx_ring->reg_idx = vsi->base_queue + i;
                rx_ring->ring_active = false;
@@ -5093,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                        set_ring_16byte_desc_enabled(rx_ring);
                else
                        clear_ring_16byte_desc_enabled(rx_ring);
-       }
-
-err_alloc_rings:
-       return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
-       if (vsi) {
-               kfree(vsi->rx_rings);
-               kfree(vsi->tx_rings);
+               vsi->rx_rings[i] = rx_ring;
        }
 
        return 0;
+
+err_out:
+       i40e_vsi_clear_rings(vsi);
+       return -ENOMEM;
 }
 
 /**
@@ -5246,6 +5355,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
        return err;
 }
 
+/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector;
+
+       /* allocate q_vector */
+       q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       q_vector->vsi = vsi;
+       q_vector->v_idx = v_idx;
+       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       if (vsi->netdev)
+               netif_napi_add(vsi->netdev, &q_vector->napi,
+                              i40e_napi_poll, vsi->work_limit);
+
+       q_vector->rx.latency_range = I40E_LOW_LATENCY;
+       q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+       /* tie q_vector and vsi together */
+       vsi->q_vectors[v_idx] = q_vector;
+
+       return 0;
+}
+
 /**
  * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
@@ -5257,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int v_idx, num_q_vectors;
+       int err;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5266,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
        else
                return -EINVAL;
 
-       vsi->q_vectors = kcalloc(num_q_vectors,
-                                sizeof(struct i40e_q_vector),
-                                GFP_KERNEL);
-       if (!vsi->q_vectors)
-               return -ENOMEM;
-
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               vsi->q_vectors[v_idx].vsi = vsi;
-               vsi->q_vectors[v_idx].v_idx = v_idx;
-               cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
-               if (vsi->netdev)
-                       netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
-                                      i40e_napi_poll, vsi->work_limit);
+               err = i40e_alloc_q_vector(vsi, v_idx);
+               if (err)
+                       goto err_out;
        }
 
        return 0;
+
+err_out:
+       while (v_idx--)
+               i40e_free_q_vector(vsi, v_idx);
+
+       return err;
 }
 
 /**
@@ -5295,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                err = i40e_init_msix(pf);
                if (err) {
-                       pf->flags &= ~(I40E_FLAG_RSS_ENABLED       |
+                       pf->flags &= ~(I40E_FLAG_MSIX_ENABLED      |
+                                       I40E_FLAG_RSS_ENABLED      |
                                        I40E_FLAG_MQ_ENABLED       |
                                        I40E_FLAG_DCB_ENABLED      |
                                        I40E_FLAG_SRIOV_ENABLED    |
@@ -5310,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+               dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
                err = pci_enable_msi(pf->pdev);
                if (err) {
-                       dev_info(&pf->pdev->dev,
-                                "MSI init failed (%d), trying legacy.\n", err);
+                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
                        pf->flags &= ~I40E_FLAG_MSI_ENABLED;
                }
        }
 
+       if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+               dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
        /* track first vector for misc interrupts */
        err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
 }
@@ -5948,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
        int ret = -ENOENT;
        struct i40e_pf *pf = vsi->back;
 
-       if (vsi->q_vectors) {
+       if (vsi->q_vectors[0]) {
                dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
                         vsi->seid);
                return -EEXIST;
@@ -5970,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
                goto vector_setup_out;
        }
 
-       vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
-                                        vsi->num_q_vectors, vsi->idx);
+       if (vsi->num_q_vectors)
+               vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+                                                vsi->num_q_vectors, vsi->idx);
        if (vsi->base_vector < 0) {
                dev_info(&pf->pdev->dev,
                         "failed to get q tracking for VSI %d, err=%d\n",
index 49d2cfa9b0cceb20c5b5f3f6d64cac1968398b60..f1f03bc5c729131bacdb13e243213b18b5b43132 100644 (file)
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
                           ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
 }
 
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        struct i40e_tx_buffer *tx_buf;
        struct i40e_tx_desc *tx_desc;
        struct i40e_ring *tx_ring;
+       unsigned int fpt, dcc;
        struct i40e_vsi *vsi;
        struct device *dev;
        dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        if (!vsi)
                return -ENOENT;
 
-       tx_ring = &vsi->tx_rings[0];
+       tx_ring = vsi->tx_rings[0];
        dev = tx_ring->dev;
 
        dma = dma_map_single(dev, fdir_data->raw_packet,
-                               I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+                            I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, dma))
                goto dma_fail;
 
        /* grab the next descriptor */
-       fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-       tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+       tx_buf = &tx_ring->tx_bi[i];
+
+       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
-       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
-                                            << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
-                                            & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+       fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+             I40E_TXD_FLTR_QW0_QINDEX_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
-                                           << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
-                                           & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+       fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+              I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
-                                            << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-                                            & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+       fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+              I40E_TXD_FLTR_QW0_PCTYPE_MASK;
 
        /* Use LAN VSI Id if not programmed by user */
        if (fdir_data->dest_vsi == 0)
-               fdir_desc->qindex_flex_ptype_vsi |=
-                                         cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
-                                          << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+               fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+                      I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
        else
-               fdir_desc->qindex_flex_ptype_vsi |=
-                                           cpu_to_le32((fdir_data->dest_vsi
-                                           << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-                                           & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+               fpt |= ((u32)fdir_data->dest_vsi <<
+                       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+                      I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 
-       fdir_desc->dtype_cmd_cntindex =
-                                   cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+       dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
        if (add)
-               fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                                      I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
-                                       << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
        else
-               fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                                          I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
-                                          << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 
-       fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
-                                         << I40E_TXD_FLTR_QW1_DEST_SHIFT)
-                                         & I40E_TXD_FLTR_QW1_DEST_MASK);
+       dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+              I40E_TXD_FLTR_QW1_DEST_MASK;
 
-       fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                    (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-                     & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+       dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+              I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
 
        if (fdir_data->cnt_index != 0) {
-               fdir_desc->dtype_cmd_cntindex |=
-                                   cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
-               fdir_desc->dtype_cmd_cntindex |=
-                                           cpu_to_le32((fdir_data->cnt_index
-                                           << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
-                                           & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+               dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+               dcc |= ((u32)fdir_data->cnt_index <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                      I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
        }
 
+       fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
        fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 
        /* Now program a dummy descriptor */
-       tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
-       tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+
+       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
        tx_desc->buffer_addr = cpu_to_le64(dma);
-       td_cmd = I40E_TX_DESC_CMD_EOP |
-                I40E_TX_DESC_CMD_RS  |
-                I40E_TX_DESC_CMD_DUMMY;
+       td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
 
-       /* Mark the data descriptor to be watched */
-       tx_buf->next_to_watch = tx_desc;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
         */
        wmb();
 
+       /* Mark the data descriptor to be watched */
+       tx_buf->next_to_watch = tx_desc;
+
        writel(tx_ring->next_to_use, tx_ring->tail);
        return 0;
 
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
 }
 
 /**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
  * @ring:      the ring that owns the buffer
  * @tx_buffer: the buffer to free
  **/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
-                                         struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+                                           struct i40e_tx_buffer *tx_buffer)
 {
-       if (tx_buffer->dma) {
-               if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
-                       dma_unmap_page(ring->dev,
-                                      tx_buffer->dma,
-                                      tx_buffer->length,
-                                      DMA_TO_DEVICE);
-               else
+       if (tx_buffer->skb) {
+               dev_kfree_skb_any(tx_buffer->skb);
+               if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
-                                        tx_buffer->dma,
-                                        tx_buffer->length,
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
                                         DMA_TO_DEVICE);
+       } else if (dma_unmap_len(tx_buffer, len)) {
+               dma_unmap_page(ring->dev,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
+                              DMA_TO_DEVICE);
        }
-       tx_buffer->dma = 0;
-       tx_buffer->time_stamp = 0;
+       tx_buffer->next_to_watch = NULL;
+       tx_buffer->skb = NULL;
+       dma_unmap_len_set(tx_buffer, len, 0);
+       /* tx_buffer must be completely set up in the transmit path */
 }
 
 /**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
  **/
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 {
-       struct i40e_tx_buffer *tx_buffer;
        unsigned long bi_size;
        u16 i;
 
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
                return;
 
        /* Free all the Tx ring sk_buffs */
-       for (i = 0; i < tx_ring->count; i++) {
-               tx_buffer = &tx_ring->tx_bi[i];
-               i40e_unmap_tx_resource(tx_ring, tx_buffer);
-               if (tx_buffer->skb)
-                       dev_kfree_skb_any(tx_buffer->skb);
-               tx_buffer->skb = NULL;
-       }
+       for (i = 0; i < tx_ring->count; i++)
+               i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
 
        bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
        memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
+
+       if (!tx_ring->netdev)
+               return;
+
+       /* cleanup Tx queue statistics */
+       netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                 tx_ring->queue_index));
 }
 
 /**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
            tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 
        tx_buf = &tx_ring->tx_bi[i];
        tx_desc = I40E_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
 
-       for (; budget; budget--) {
-               struct i40e_tx_desc *eop_desc;
-
-               eop_desc = tx_buf->next_to_watch;
+       do {
+               struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
                /* if next_to_watch is not set then there is no work pending */
                if (!eop_desc)
                        break;
 
+               /* prevent any other reads prior to eop_desc */
+               read_barrier_depends();
+
                /* if the descriptor isn't done, no work yet to do */
                if (!(eop_desc->cmd_type_offset_bsz &
                      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
                        break;
 
-               /* count the packet as being completed */
-               tx_ring->tx_stats.completed++;
+               /* clear next_to_watch to prevent false hangs */
                tx_buf->next_to_watch = NULL;
-               tx_buf->time_stamp = 0;
-
-               /* set memory barrier before eop_desc is verified */
-               rmb();
 
-               do {
-                       i40e_unmap_tx_resource(tx_ring, tx_buf);
+               /* update the statistics for this packet */
+               total_bytes += tx_buf->bytecount;
+               total_packets += tx_buf->gso_segs;
 
-                       /* clear dtype status */
-                       tx_desc->cmd_type_offset_bsz &=
-                               ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+               /* free the skb */
+               dev_kfree_skb_any(tx_buf->skb);
 
-                       if (likely(tx_desc == eop_desc)) {
-                               eop_desc = NULL;
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buf, dma),
+                                dma_unmap_len(tx_buf, len),
+                                DMA_TO_DEVICE);
 
-                               dev_kfree_skb_any(tx_buf->skb);
-                               tx_buf->skb = NULL;
+               /* clear tx_buffer data */
+               tx_buf->skb = NULL;
+               dma_unmap_len_set(tx_buf, len, 0);
 
-                               total_bytes += tx_buf->bytecount;
-                               total_packets += tx_buf->gso_segs;
-                       }
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
 
                        tx_buf++;
                        tx_desc++;
                        i++;
-                       if (unlikely(i == tx_ring->count)) {
-                               i = 0;
+                       if (unlikely(!i)) {
+                               i -= tx_ring->count;
                                tx_buf = tx_ring->tx_bi;
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                        }
-               } while (eop_desc);
-       }
 
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buf, len)) {
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buf, dma),
+                                              dma_unmap_len(tx_buf, len),
+                                              DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buf, len, 0);
+                       }
+               }
+
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
        tx_ring->next_to_clean = i;
-       tx_ring->tx_stats.bytes += total_bytes;
-       tx_ring->tx_stats.packets += total_packets;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
+       u64_stats_update_end(&tx_ring->syncp);
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                return true;
        }
 
+       netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                     tx_ring->queue_index),
+                                 total_packets, total_bytes);
+
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
        i40e_set_new_dynamic_itr(&q_vector->tx);
        if (old_itr != q_vector->tx.itr)
                wr32(hw, reg_addr, q_vector->tx.itr);
-
-       i40e_flush(hw);
 }
 
 /**
@@ -1042,8 +1064,10 @@ next_desc:
        }
 
        rx_ring->next_to_clean = i;
-       rx_ring->rx_stats.packets += total_rx_packets;
-       rx_ring->rx_stats.bytes += total_rx_bytes;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_q_vector *q_vector =
                               container_of(napi, struct i40e_q_vector, napi);
        struct i40e_vsi *vsi = q_vector->vsi;
+       struct i40e_ring *ring;
        bool clean_complete = true;
        int budget_per_ring;
-       int i;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
                return 0;
        }
 
+       /* Since the actual Tx work is minimal, we can give the Tx a larger
+        * budget and be more aggressive about cleaning up the Tx descriptors.
+        */
+       i40e_for_each_ring(ring, q_vector->tx)
+               clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
-        * Since the actual Tx work is minimal, we can give the Tx a larger
-        * budget and be more aggressive about cleaning up the Tx descriptors.
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
-       for (i = 0; i < q_vector->num_ringpairs; i++) {
-               clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
-                                                   vsi->work_limit);
-               clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
-                                                   budget_per_ring);
-       }
+
+       i40e_for_each_ring(ring, q_vector->rx)
+               clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                        qval = rd32(hw, I40E_QINT_TQCTL(0));
                        qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
                        wr32(hw, I40E_QINT_TQCTL(0), qval);
-                       i40e_flush(hw);
+
+                       i40e_irq_dynamic_enable_icr0(vsi->back);
                }
        }
 
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        struct tcphdr *th;
        unsigned int hlen;
        u32 flex_ptype, dtype_cmd;
+       u16 i;
 
        /* make sure ATR is enabled */
        if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        tx_ring->atr_count = 0;
 
        /* grab the next descriptor */
-       fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
        flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
                      I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
 }
 
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
@@ -1275,27 +1302,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        return 0;
 }
 
-/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring:  ptr to the ring to send
- * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                        u32 tx_flags, __be16 protocol)
-{
-       if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
-           !(tx_flags & I40E_TX_FLAGS_TXSW)) {
-               if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
-                       return false;
-       }
-
-       return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
 /**
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
                               const u32 cd_tunneling, const u32 cd_l2tag2)
 {
        struct i40e_tx_context_desc *context_desc;
+       int i = tx_ring->next_to_use;
 
        if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
                return;
 
        /* grab the next descriptor */
-       context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct device *dev = tx_ring->dev;
-       u32 paylen = skb->len - hdr_len;
-       u16 i = tx_ring->next_to_use;
+       struct skb_frag_struct *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
-       u32 buf_offset = 0;
+       u16 i = tx_ring->next_to_use;
        u32 td_tag = 0;
        dma_addr_t dma;
        u16 gso_segs;
 
-       dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma))
-               goto dma_error;
-
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
                td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
                td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
                         I40E_TX_FLAGS_VLAN_SHIFT;
        }
 
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+               gso_segs = skb_shinfo(skb)->gso_segs;
+       else
+               gso_segs = 1;
+
+       /* multiply data chunks by size of headers */
+       first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+       first->gso_segs = gso_segs;
+       first->skb = skb;
+       first->tx_flags = tx_flags;
+
+       dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
        tx_desc = I40E_TX_DESC(tx_ring, i);
-       for (;;) {
-               while (size > I40E_MAX_DATA_PER_TXD) {
-                       tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+       tx_bi = first;
+
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               if (dma_mapping_error(tx_ring->dev, dma))
+                       goto dma_error;
+
+               /* record length, and DMA address */
+               dma_unmap_len_set(tx_bi, len, size);
+               dma_unmap_addr_set(tx_bi, dma, dma);
+
+               tx_desc->buffer_addr = cpu_to_le64(dma);
+
+               while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
                        tx_desc->cmd_type_offset_bsz =
                                build_ctob(td_cmd, td_offset,
                                           I40E_MAX_DATA_PER_TXD, td_tag);
 
-                       buf_offset += I40E_MAX_DATA_PER_TXD;
-                       size -= I40E_MAX_DATA_PER_TXD;
-
                        tx_desc++;
                        i++;
                        if (i == tx_ring->count) {
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
-               }
 
-               tx_bi = &tx_ring->tx_bi[i];
-               tx_bi->length = buf_offset + size;
-               tx_bi->tx_flags = tx_flags;
-               tx_bi->dma = dma;
+                       dma += I40E_MAX_DATA_PER_TXD;
+                       size -= I40E_MAX_DATA_PER_TXD;
 
-               tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
-               tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
-                                                         size, td_tag);
+                       tx_desc->buffer_addr = cpu_to_le64(dma);
+               }
 
                if (likely(!data_len))
                        break;
 
-               size = skb_frag_size(frag);
-               data_len -= size;
-               buf_offset = 0;
-               tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
-               dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, dma))
-                       goto dma_error;
+               tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+                                                         size, td_tag);
 
                tx_desc++;
                i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        i = 0;
                }
 
-               frag++;
-       }
-
-       tx_desc->cmd_type_offset_bsz |=
-                      cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+               size = skb_frag_size(frag);
+               data_len -= size;
 
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
+               dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+                                      DMA_TO_DEVICE);
 
-       tx_ring->next_to_use = i;
+               tx_bi = &tx_ring->tx_bi[i];
+       }
 
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
-               gso_segs = skb_shinfo(skb)->gso_segs;
-       else
-               gso_segs = 1;
+       tx_desc->cmd_type_offset_bsz =
+               build_ctob(td_cmd, td_offset, size, td_tag) |
+               cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
 
-       /* multiply data chunks by size of headers */
-       tx_bi->bytecount = paylen + (gso_segs * hdr_len);
-       tx_bi->gso_segs = gso_segs;
-       tx_bi->skb = skb;
+       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                tx_ring->queue_index),
+                            first->bytecount);
 
-       /* set the timestamp and next to watch values */
+       /* set the timestamp */
        first->time_stamp = jiffies;
-       first->next_to_watch = tx_desc;
 
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
         */
        wmb();
 
+       /* set next_to_watch value indicating a packet is present */
+       first->next_to_watch = tx_desc;
+
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       tx_ring->next_to_use = i;
+
+       /* notify HW of packet */
        writel(i, tx_ring->tail);
+
        return;
 
 dma_error:
-       dev_info(dev, "TX DMA map failed\n");
+       dev_info(tx_ring->dev, "TX DMA map failed\n");
 
        /* clear dma mappings for failed tx_bi map */
        for (;;) {
                tx_bi = &tx_ring->tx_bi[i];
-               i40e_unmap_tx_resource(tx_ring, tx_bi);
+               i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
                if (tx_bi == first)
                        break;
                if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
                i--;
        }
 
-       dev_kfree_skb_any(skb);
-
        tx_ring->next_to_use = i;
 }
 
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
+       /* always enable CRC insertion offload */
+       td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
        /* Always offload the checksum, since it's in the data descriptor */
-       if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-       /* always enable offload insertion */
-       td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
-       if (tx_flags & I40E_TX_FLAGS_CSUM)
                i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
+       }
 
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+       struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
 
        /* hardware can't handle really short frames, hardware padding works
         * beyond this point
index b1d7722d98a7fc4cd217d43cae627895f5baa5f6..db55d9947f151cf19c17f2f55e8ee24e69364cbd 100644 (file)
 #define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW             (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE   (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
 #define I40E_TX_FLAGS_VLAN_SHIFT       16
 
 struct i40e_tx_buffer {
-       struct sk_buff *skb;
-       dma_addr_t dma;
-       unsigned long time_stamp;
-       u16 length;
-       u32 tx_flags;
        struct i40e_tx_desc *next_to_watch;
+       unsigned long time_stamp;
+       struct sk_buff *skb;
        unsigned int bytecount;
-       u16 gso_segs;
-       u8 mapped_as_page;
+       unsigned short gso_segs;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
+       u32 tx_flags;
 };
 
 struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
        unsigned int page_offset;
 };
 
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
        u64 packets;
        u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
        u64 restart_queue;
        u64 tx_busy;
-       u64 completed;
        u64 tx_done_old;
 };
 
 struct i40e_rx_queue_stats {
-       u64 packets;
-       u64 bytes;
        u64 non_eop_descs;
        u64 alloc_rx_page_failed;
        u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
+       struct i40e_ring *next;         /* pointer to next ring in q_vector */
        void *desc;                     /* Descriptor ring memory */
        struct device *dev;             /* Used for DMA mapping */
        struct net_device *netdev;      /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
        bool ring_active;               /* is ring online or not */
 
        /* stats structs */
+       struct i40e_queue_stats stats;
+       struct u64_stats_sync syncp;
        union {
                struct i40e_tx_queue_stats tx_stats;
                struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
 
        struct i40e_vsi *vsi;           /* Backreference to associated VSI */
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+       struct rcu_head rcu;            /* to avoid race on free */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
 };
 
 struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
        /* array of pointers to rings */
-       struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+       struct i40e_ring *ring;
        unsigned int total_bytes;       /* total bytes processed this int */
        unsigned int total_packets;     /* total packets processed this int */
        u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
        u16 itr;
 };
 
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+       for (pos = (head).ring; pos != NULL; pos = pos->next)
+
 void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
index 8967e58e2408a4930507091bfa85bd26a3862b6f..07596982a4773fc942a5a0c3b8a458bdaa837768 100644 (file)
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
        else
                reg_idx = I40E_VPINT_LNKLSTN(
-                           ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+                                          (pf->hw.func_caps.num_msix_vectors_vf
                                              * vf->vf_id) + (vector_id - 1));
 
        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
 
        /* associate this queue with the PCI VF function */
        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
-       qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
                    & I40E_QTX_CTL_PF_INDX_MASK);
        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
index 74a1506b42359d02305ea9d78cda7e536633559f..8c2437722aad2b32fec48bfc3f17bdf71db5c164 100644 (file)
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
 
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                               u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                     u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                      u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
                                      (ID_LED_DEF1_DEF2 <<  8) | \
index 37a9c06a6c6816bf3f87c82986851504d79d7b71..2e166b22d52b6374e3814a9526d6cad9decfa5a3 100644 (file)
@@ -562,11 +562,11 @@ struct e1000_hw {
        u8  revision_id;
 };
 
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 #define hw_dbg(format, arg...) \
        netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
 
 /* These functions must be implemented by drivers */
-s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index dde3c4b7ea9971db46981bc319faeb5fe56e452c..2d913716573a29a830610de841164e30e505cdfb 100644 (file)
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
 
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
-                             u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
-                            u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
-                                struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                             u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                              u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                           u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                          u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+                         struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
index 5e13e83cc608358273d3e7dadb6bf488e426127b..e4cbe8ef67b3094b32b73cbccda9be5673243d1f 100644 (file)
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
 
 #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN      0x2
 
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
 
 #endif
index 6807b098edaee27c0343f3903a080c08604ea3d5..5e9ed89403aa45adb9238330e4937e8aa1f4cd09 100644 (file)
@@ -483,40 +483,38 @@ enum igb_boards {
 extern char igb_driver_name[];
 extern char igb_driver_version[];
 
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
-                                          struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-                               struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
-                               unsigned char *va,
-                               struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+                        struct sk_buff *skb);
 static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
                                       union e1000_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
-                                 struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+                          int cmd);
 #ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
 #endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
index 48cbc833b051b3861a4568e72becf67eeaa357e4..0ae3177416c736ea5c48b2eead34a2c0553f7368 100644 (file)
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
                        igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
                        igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
                }
+       } else if (hw->phy.type == e1000_phy_82580) {
+               /* enable MII loopback */
+               igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
        }
 
        /* add small delay to avoid loopback test failure */
@@ -1656,7 +1659,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
                if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+               (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
 
                        /* Enable DH89xxCC MPHY for near end loopback */
                        reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1722,7 +1726,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
        if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+       (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
                u32 reg;
 
                /* Disable near end loopback on DH89xxCC */
@@ -2652,6 +2657,8 @@ static int igb_set_eee(struct net_device *netdev,
            (hw->phy.media_type != e1000_media_type_copper))
                return -EOPNOTSUPP;
 
+       memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
        ret_val = igb_get_eee(netdev, &eee_curr);
        if (ret_val)
                return ret_val;
@@ -2872,6 +2879,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
        return 0;
 }
 
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int max_combined = 0;
+
+       switch (hw->mac.type) {
+       case e1000_i211:
+               max_combined = IGB_MAX_RX_QUEUES_I211;
+               break;
+       case e1000_82575:
+       case e1000_i210:
+               max_combined = IGB_MAX_RX_QUEUES_82575;
+               break;
+       case e1000_i350:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 1;
+                       break;
+               }
+               /* fall through */
+       case e1000_82576:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 2;
+                       break;
+               }
+               /* fall through */
+       case e1000_82580:
+       case e1000_i354:
+       default:
+               max_combined = IGB_MAX_RX_QUEUES;
+               break;
+       }
+
+       return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+                            struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+
+       /* Report maximum channels */
+       ch->max_combined = igb_max_channels(adapter);
+
+       /* Report info for other vector */
+       if (adapter->msix_entries) {
+               ch->max_other = NON_Q_VECTORS;
+               ch->other_count = NON_Q_VECTORS;
+       }
+
+       ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+                           struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       unsigned int count = ch->combined_count;
+
+       /* Verify they are not requesting separate vectors */
+       if (!count || ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       /* Verify other_count is valid and has not been changed */
+       if (ch->other_count != NON_Q_VECTORS)
+               return -EINVAL;
+
+       /* Verify the number of channels doesn't exceed hw limits */
+       if (count > igb_max_channels(adapter))
+               return -EINVAL;
+
+       if (count != adapter->rss_queues) {
+               adapter->rss_queues = count;
+
+               /* Hardware has to reinitialize queues and interrupts to
+                * match the new configuration.
+                */
+               return igb_reinit_queues(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
        .get_settings           = igb_get_settings,
        .set_settings           = igb_set_settings,
@@ -2908,6 +2997,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .get_rxfh_indir_size    = igb_get_rxfh_indir_size,
        .get_rxfh_indir         = igb_get_rxfh_indir,
        .set_rxfh_indir         = igb_set_rxfh_indir,
+       .get_channels           = igb_get_channels,
+       .set_channels           = igb_set_channels,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
 };
index 8cf44f2a8ccd5b531f42fa0dfb357a19d6efa4f6..66784cd9a7e5e40fb6eaa2d7cdd76c3fb0cfe1fa 100644 (file)
@@ -2034,21 +2034,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
@@ -5708,7 +5702,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
        /* reply to reset with ack and vf mac address */
        msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
-       memcpy(addr, vf_mac, 6);
+       memcpy(addr, vf_mac, ETH_ALEN);
        igb_write_mbx(hw, msgbuf, 3, vf);
 }
 
@@ -7838,4 +7832,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                return E1000_SUCCESS;
 
 }
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       int err = 0;
+
+       if (netif_running(netdev))
+               igb_close(netdev);
+
+       igb_clear_interrupt_scheme(adapter);
+
+       if (igb_init_interrupt_scheme(adapter, true)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
+       }
+
+       if (netif_running(netdev))
+               err = igb_open(netdev);
+
+       return err;
+}
 /* igb_main.c */
index a1463e3d14c0522511c75c424ffa16bd3162411a..7d6a25c8f889efd0eb69ef749dd0bb13f18302ad 100644 (file)
@@ -312,17 +312,17 @@ enum igbvf_state_t {
 extern char igbvf_driver_name[];
 extern const char igbvf_driver_version[];
 
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
 
 extern unsigned int copybreak;
 
index 93eb7ee06d3e0aa2159fcccc61e797eb44fd79bd..4e6b02fbe65280614c68b6b76d013368281ccc4f 100644 (file)
@@ -2638,21 +2638,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
index eea0e10ce12f95d47ad884d48170495913f8db70..955ad8c2c53456a2bb9ce28515a00d52264ed3ee 100644 (file)
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
                        if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
-                               memcpy(hw->mac.perm_addr, addr, 6);
+                               memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
                        else
                                ret_val = -E1000_ERR_MAC_INIT;
                }
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
 
        memset(msgbuf, 0, 12);
        msgbuf[0] = E1000_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 4d2ae97ff1b3d7380de5c2ea4653f33fda330880..2224cc2edf1396d338b9786a2dd23af8b77aa581 100644 (file)
@@ -187,21 +187,21 @@ enum ixgb_state_t {
 };
 
 /* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
 extern const char ixgb_driver_version[];
 
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
 
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
 
 
 #endif /* _IXGB_H_ */
index 2a99a35c33aa24c30a61837ae880ea373b908c14..0bd5d72e1af5b6be4873baf31431e7f5ad09a347 100644 (file)
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
 };
 
 /* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
-                               u8 *addr,
-                               u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
 
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
 
 /* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-                                  u8 *mc_addr_list,
-                                  u32 mc_addr_count,
-                                  u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+                             u32 mc_addr_count, u32 pad);
 
 /* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
-                                u32 offset,
-                                u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
 
 /* Access functions to eeprom data */
 void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
index 9f6b236828e6a2d6e5342c0acfd5ba6222a4d531..57e390cbe6d0d21630f6bf6904a0c8a663601931 100644 (file)
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               pr_err("No usable DMA configuration, aborting\n");
-                               goto err_dma_mask;
-                       }
+                       pr_err("No usable DMA configuration, aborting\n");
+                       goto err_dma_mask;
                }
        }
 
index 0ac6b11c6e4ec323aaa50db976e2a996b1b02d19..dc1588ee264a31da9e7561904f4ced02b0861502 100644 (file)
@@ -55,7 +55,7 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
 #endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
        u64 yields;
        u64 misses;
        u64 cleaned;
-#endif  /* LL_EXTENDED_STATS */
+#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
                WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
                q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->tx.ring->stats.yields++;
 #endif
        } else
@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
        if ((q_vector->state & IXGBE_QV_LOCKED)) {
                q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->rx.ring->stats.yields++;
 #endif
        } else
@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
        return q_vector->state & IXGBE_QV_USER_PEND;
@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
        return false;
 }
 
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        return false;
 }
@@ -786,93 +786,89 @@ extern const char ixgbe_driver_version[];
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                               u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-                                        struct ixgbe_adapter *,
-                                        struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
-                                             struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_hash_dword input,
-                                                union ixgbe_atr_hash_dword common,
-                                                 u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-                                                union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+                                 struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+                                     struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 #endif /* CONFIG_IXGBE_HWMON */
 #ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
-                    struct ixgbe_tx_buffer *first,
-                    u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
-                         union ixgbe_adv_rx_desc *rx_desc,
-                         struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                              struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
-                                struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+             u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+                  union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                      struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                         struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
-                                 struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+                          struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
 #ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
 #else
 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +880,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                                   struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                            struct sk_buff *skb);
 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
                                         union ixgbe_adv_rx_desc *rx_desc,
                                         struct sk_buff *skb)
@@ -906,11 +902,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
-                                   struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+                            int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
 #endif
index e8649abf97c0dd93152d7037bae870eaf3532954..90aac31b3551746d8fd3bd95d61d5000d2fc4a1a 100644 (file)
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 
 static int ixgbe_get_regs_len(struct net_device *netdev)
 {
-#define IXGBE_REGS_LEN  1129
+#define IXGBE_REGS_LEN  1139
        return IXGBE_REGS_LEN * sizeof(u32);
 }
 
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
        /* DCB */
-       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
-       regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
-       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
-       regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
-       for (i = 0; i < 8; i++)
-               regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
+       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+               break;
+       default:
+               break;
+       }
+
        for (i = 0; i < 8; i++)
-               regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+               regs_buff[865 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
        for (i = 0; i < 8; i++)
-               regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+               regs_buff[873 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
 
        /* Statistics */
        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
 
        /* 82599 X540 specific registers  */
        regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+       /* 82599 X540 specific DCB registers  */
+       regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+       regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+       for (i = 0; i < 4; i++)
+               regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+       regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+                                       /* same as RTTQCNRM */
+       regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+                                       /* same as RTTQCNRR */
+
+       /* X540 specific DCB registers  */
+       regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+       regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
 }
 
 static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_misses", i);
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_misses", i);
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
index 0ade0cd5ef53ffab28b3fd34136374bfe9f4b51e..546980fe64b839de10c3c49cdde030cd43d71870 100644 (file)
@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 
-       if (ixgbe_qv_ll_polling(q_vector))
+       if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
                found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                if (found)
                        ring->stats.cleaned += found;
                else
@@ -7490,19 +7490,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
index 24af12e3719e00c8345ba750399226a8522105de..aae900a256da98601a501a121eed1f14347d3997 100644 (file)
 #define IXGBE_SFF_QSFP_DEVICE_TECH     0x93
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
-#define IXGBE_SFF_1GBASET_CAPABLE            0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK  0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G   0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G    0x0
-#define IXGBE_SFF_ADDRESSING_MODE           0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE       0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE      0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE             0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE              0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING      0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE             0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE             0x2
+#define IXGBE_SFF_1GBASET_CAPABLE              0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE            0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE            0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK          0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
+#define IXGBE_SFF_ADDRESSING_MODE              0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL  0x0
-#define IXGBE_I2C_EEPROM_READ_MASK           0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+#define IXGBE_I2C_EEPROM_READ_MASK             0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK           0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION   0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS           0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL           0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
index 276d7b135332c1c1c0ec4415f0594f15ffd9f1d2..1fe7cb0142e106919e772f668fe5a5411b8ff2fc 100644 (file)
@@ -558,7 +558,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
-       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
        hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 
        return 0;
@@ -621,16 +621,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
-       unsigned char vf_mac_addr[6];
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        unsigned int vfn = (event_mask & 0x3f);
 
        bool enable = ((event_mask & 0x10000000U) != 0);
 
-       if (enable) {
-               eth_zero_addr(vf_mac_addr);
-               memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-       }
+       if (enable)
+               eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 
        return 0;
 }
index 10775cb9b6d84c066263c51881257bf8995e037a..7c19e969576f60160649f3ea95b6033582ed9552 100644 (file)
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTDQSEL    0x04904
 #define IXGBE_RTTDT1C     0x04908
 #define IXGBE_RTTDT1S     0x0490C
+#define IXGBE_RTTQCNCR    0x08B00
+#define IXGBE_RTTQCNTG    0x04A90
+#define IXGBE_RTTBCNRD    0x0498C
+#define IXGBE_RTTQCNRR    0x0498C
 #define IXGBE_RTTDTECC    0x04990
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTBCNRC_RF_INT_MASK     \
        (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
 #define IXGBE_RTTBCNRM    0x04980
+#define IXGBE_RTTQCNRM    0x04980
 
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
index 389324f5929a5ef9fd16bac01b9a6d799943747a..24b80a6cfca4ec593f7bed4bf2e1423213125d90 100644 (file)
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES   128
-#define IXGBE_X540_MC_TBL_SIZE   128
-#define IXGBE_X540_VFT_TBL_SIZE  128
-#define IXGBE_X540_RX_PB_SIZE   384
+#define IXGBE_X540_MAX_TX_QUEUES       128
+#define IXGBE_X540_MAX_RX_QUEUES       128
+#define IXGBE_X540_RAR_ENTRIES         128
+#define IXGBE_X540_MC_TBL_SIZE         128
+#define IXGBE_X540_VFT_TBL_SIZE                128
+#define IXGBE_X540_RX_PB_SIZE          384
 
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
index c9d0c12d6f04156e19ec0240c19d64c0f23ebc8a..84329b0d567a1caeb55fb197e6f4bfe0e07944f1 100644 (file)
@@ -140,58 +140,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
 
 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
 
-static char *ixgbevf_reg_names[] = {
-       "IXGBE_VFCTRL",
-       "IXGBE_VFSTATUS",
-       "IXGBE_VFLINKS",
-       "IXGBE_VFRXMEMWRAP",
-       "IXGBE_VFFRTIMER",
-       "IXGBE_VTEICR",
-       "IXGBE_VTEICS",
-       "IXGBE_VTEIMS",
-       "IXGBE_VTEIMC",
-       "IXGBE_VTEIAC",
-       "IXGBE_VTEIAM",
-       "IXGBE_VTEITR",
-       "IXGBE_VTIVAR",
-       "IXGBE_VTIVAR_MISC",
-       "IXGBE_VFRDBAL0",
-       "IXGBE_VFRDBAL1",
-       "IXGBE_VFRDBAH0",
-       "IXGBE_VFRDBAH1",
-       "IXGBE_VFRDLEN0",
-       "IXGBE_VFRDLEN1",
-       "IXGBE_VFRDH0",
-       "IXGBE_VFRDH1",
-       "IXGBE_VFRDT0",
-       "IXGBE_VFRDT1",
-       "IXGBE_VFRXDCTL0",
-       "IXGBE_VFRXDCTL1",
-       "IXGBE_VFSRRCTL0",
-       "IXGBE_VFSRRCTL1",
-       "IXGBE_VFPSRTYPE",
-       "IXGBE_VFTDBAL0",
-       "IXGBE_VFTDBAL1",
-       "IXGBE_VFTDBAH0",
-       "IXGBE_VFTDBAH1",
-       "IXGBE_VFTDLEN0",
-       "IXGBE_VFTDLEN1",
-       "IXGBE_VFTDH0",
-       "IXGBE_VFTDH1",
-       "IXGBE_VFTDT0",
-       "IXGBE_VFTDT1",
-       "IXGBE_VFTXDCTL0",
-       "IXGBE_VFTXDCTL1",
-       "IXGBE_VFTDWBAL0",
-       "IXGBE_VFTDWBAL1",
-       "IXGBE_VFTDWBAH0",
-       "IXGBE_VFTDWBAH1"
-};
-
-
 static int ixgbevf_get_regs_len(struct net_device *netdev)
 {
-       return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+       return IXGBE_REGS_LEN * sizeof(u32);
 }
 
 static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +216,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
                regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
        for (i = 0; i < 2; i++)
                regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
-       for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
-               hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
 }
 
 static void ixgbevf_get_drvinfo(struct net_device *netdev,
index fff0d98675295182fd15b7bbba36820274286ea2..64a2b912e73c4cf6c09a71571b3d6b321375ec7b 100644 (file)
@@ -281,27 +281,23 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
 
 #ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
        printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
 #else
index 59a62bbfb3714a55683bec8fcb49e9adcb5cb279..275ccde300cd3dfddd3f5fb2473dde35bbcd7790 100644 (file)
@@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
        struct ixgbevf_adapter *adapter = data;
-       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 msg;
-       bool got_ack = false;
 
        hw->mac.get_link_status = 1;
-       if (!hw->mbx.ops.check_for_ack(hw))
-               got_ack = true;
-
-       if (!hw->mbx.ops.check_for_msg(hw)) {
-               hw->mbx.ops.read(hw, &msg, 1);
-
-               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
-                       mod_timer(&adapter->watchdog_timer,
-                                 round_jiffies(jiffies + 1));
-                       adapter->link_up = false;
-               }
 
-               if (msg & IXGBE_VT_MSGTYPE_NACK)
-                       dev_info(&pdev->dev,
-                                "Last Request of type %2.2x to PF Nacked\n",
-                                msg & 0xFF);
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-       }
-
-       /* checking for the ack clears the PFACK bit.  Place
-        * it back in the v2p_mailbox cache so that anyone
-        * polling for an ack will not miss it
-        */
-       if (got_ack)
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -1327,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
        }
 }
 
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-                                               int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+                                        int rxr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
        int j = adapter->rx_ring[rxr].reg_idx;
-       int k;
 
-       for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
-               if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
-                       break;
-               else
-                       msleep(1);
-       }
-       if (k >= IXGBE_MAX_RX_DESC_POLL) {
-               hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
-                      "not set within the polling period\n", rxr);
-       }
+       do {
+               usleep_range(1000, 2000);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+       } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+                      rxr);
 
-       ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
-                               adapter->rx_ring[rxr].count - 1);
+       ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+                               (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+                                    struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
+
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+       /* the hardware may take up to 100us to really disable the rx queue */
+       do {
+               udelay(10);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+                      reg_idx);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
-       ixgbevf_negotiate_api(adapter);
-
        ixgbevf_reset_queues(adapter);
 
        ixgbevf_configure(adapter);
@@ -1679,7 +1676,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBEVF_DOWN, &adapter->state);
-       /* disable receives */
+
+       /* disable all enabled rx queues */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
 
        netif_tx_disable(netdev);
 
@@ -1733,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
 
-       if (hw->mac.ops.reset_hw(hw))
+       if (hw->mac.ops.reset_hw(hw)) {
                hw_dbg(hw, "PF still resetting\n");
-       else
+       } else {
                hw->mac.ops.init_hw(hw);
+               ixgbevf_negotiate_api(adapter);
+       }
 
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -2072,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        hw->mac.max_tx_queues = 2;
        hw->mac.max_rx_queues = 2;
 
+       /* lock to protect mailbox accesses */
+       spin_lock_init(&adapter->mbx_lock);
+
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
@@ -2082,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                        pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
+               ixgbevf_negotiate_api(adapter);
                err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
        }
 
-       /* lock to protect mailbox accesses */
-       spin_lock_init(&adapter->mbx_lock);
-
        /* Enable dynamic interrupt throttling rates */
        adapter->rx_itr_setting = 1;
        adapter->tx_itr_setting = 1;
@@ -2620,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
-       ixgbevf_negotiate_api(adapter);
-
        /* setup queue reg_idx and Rx queue count */
        err = ixgbevf_setup_queues(adapter);
        if (err)
@@ -3216,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
 
+       ixgbevf_reset(adapter);
+
        rtnl_lock();
        err = ixgbevf_init_interrupt_scheme(adapter);
        rtnl_unlock();
@@ -3224,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
                return err;
        }
 
-       ixgbevf_reset(adapter);
-
        if (netif_running(netdev)) {
                err = ixgbevf_open(netdev);
                if (err)
@@ -3326,19 +3327,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
index 387b52635bc051259a5e5b99d9740c69bcca687a..4d44d64ae3870c42dd62bb4d1cdfeb96fe9122b9 100644 (file)
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
        msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
        if (addr)
-               memcpy(msg_addr, addr, 6);
+               memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 
        memset(msgbuf, 0, sizeof(msgbuf));
        msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 23de82a9da82ff408181759505450adfd439e88d..f5685c0d057911c6c2f59dd982f21f869d1b978c 100644 (file)
@@ -309,7 +309,7 @@ static void
 jme_load_macaddr(struct net_device *netdev)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       unsigned char macaddr[6];
+       unsigned char macaddr[ETH_ALEN];
        u32 val;
 
        spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
        val = jread32(jme, JME_RXUMA_HI);
        macaddr[4] = (val >>  0) & 0xFF;
        macaddr[5] = (val >>  8) & 0xFF;
-       memcpy(netdev->dev_addr, macaddr, 6);
+       memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
        spin_unlock_bh(&jme->macaddr_lock);
 }
 
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
 err_out_unmap:
        iounmap(jme->regs);
 err_out_free_netdev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 err_out_release_regions:
        pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(netdev);
        iounmap(jme->regs);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index a36fa80968eb1a0a42e9051b8ca2ea91ece4840c..4a5e3b0f712e82e7253f92c7ef19ebd282f5dd4d 100644 (file)
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
        lp = netdev_priv(dev);
 
        bif->dev = dev;
-       memcpy(dev->dev_addr, bif->mac, 6);
+       memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
 
        lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
        lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
index 7fb5677451f9fb7aee3ebccd31a82b476cfac4df..4cfae6c9a63fb29f9bb8da02713b60835bb0f79c 100644 (file)
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
        p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
        spin_unlock_bh(&mp->mib_counters_lock);
-
-       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 static void mib_counters_timer_wrapper(unsigned long _mp)
 {
        struct mv643xx_eth_private *mp = (void *)_mp;
-
        mib_counters_update(mp);
+       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
                mp->int_mask |= INT_TX_END_0 << i;
        }
 
+       add_timer(&mp->mib_counters_timer);
        port_start(mp);
 
        wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2514,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
 
        mac_addr = of_get_mac_address(pnp);
        if (mac_addr)
-               memcpy(ppd.mac_addr, mac_addr, 6);
+               memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
 
        mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
        mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
        if (!ppdev)
                return -ENOMEM;
        ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ppdev->dev.of_node = pnp;
 
        ret = platform_device_add_resources(ppdev, &res, 1);
        if (ret)
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
        struct net_device *dev = mp->dev;
 
        if (is_valid_ether_addr(pd->mac_addr))
-               memcpy(dev->dev_addr, pd->mac_addr, 6);
+               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
        else
                uc_addr_get(mp, dev->dev_addr);
 
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->mib_counters_timer.data = (unsigned long)mp;
        mp->mib_counters_timer.function = mib_counters_timer_wrapper;
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
-       add_timer(&mp->mib_counters_timer);
 
        spin_lock_init(&mp->mib_counters_lock);
 
index 1a9c4f6269ea8a3422781bc14f2f7164b57ff7bf..5978461938699f098f7927c7d439cd9134fbc6a7 100644 (file)
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                                               PCI_DMA_FROMDEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
+               struct skge_element ee;
                struct sk_buff *nskb;
 
                nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
                if (!nskb)
                        goto resubmit;
 
-               skb = e->skb;
+               ee = *e;
+
+               skb = ee.skb;
                prefetch(skb->data);
 
                if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                }
 
                pci_unmap_single(skge->hw->pdev,
-                                dma_unmap_addr(e, mapaddr),
-                                dma_unmap_len(e, maplen),
+                                dma_unmap_addr(&ee, mapaddr),
+                                dma_unmap_len(&ee, maplen),
                                 PCI_DMA_FROMDEVICE);
        }
 
@@ -4043,7 +4046,6 @@ err_out_free_regions:
        pci_release_regions(pdev);
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_out:
        return err;
 }
@@ -4087,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
 
        iounmap(hw->regs);
        kfree(hw);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
index e09a8c6f85366c4c1251f5365549e154f743f08c..a7df981d2123a9d0c73180fbc4fc70834adacd98 100644 (file)
@@ -5081,7 +5081,6 @@ err_out_free_regions:
 err_out_disable:
        pci_disable_device(pdev);
 err_out:
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -5124,8 +5123,6 @@ static void sky2_remove(struct pci_dev *pdev)
 
        iounmap(hw->regs);
        kfree(hw);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int sky2_suspend(struct device *dev)
index ea20182c6969245e53a3a9da563c76461555f7f5..735765c21c9595a8d7b3bacce95de32589a7ee4e 100644 (file)
@@ -2253,7 +2253,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_vport_oper_state *vf_oper;
        struct mlx4_vport_state *vf_admin;
        int slave;
 
@@ -2269,7 +2268,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
                return -EINVAL;
 
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
-       vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if ((0 == vlan) && (0 == qos))
                vf_admin->default_vlan = MLX4_VGT;
index fa37b7a612133c739263346a5d6f372af0fd9dda..85d91665d400117f8a0adb489e7ab27aae2f5f0f 100644 (file)
@@ -1733,7 +1733,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
 
        /* Unregister Mac address for the port */
        mlx4_en_put_qp(priv);
-       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
                mdev->mac_removed[priv->port] = 1;
 
        /* Free RX Rings */
index dec455c8f6274a9827f93bf038d8e12c58242765..afe2efa69c8683647766a8d5dc7d561f3f76c2df 100644 (file)
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                put_page(page);
                return -ENOMEM;
        }
-       page_alloc->size = PAGE_SIZE << order;
+       page_alloc->page_size = PAGE_SIZE << order;
        page_alloc->page = page;
        page_alloc->dma = dma;
-       page_alloc->offset = frag_info->frag_align;
+       page_alloc->page_offset = frag_info->frag_align;
        /* Not doing get_page() for each frag is a big win
         * on asymetric workloads.
         */
-       atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+       atomic_set(&page->_count,
+                  page_alloc->page_size / frag_info->frag_stride);
        return 0;
 }
 
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
        for (i = 0; i < priv->num_frags; i++) {
                frag_info = &priv->frag_info[i];
                page_alloc[i] = ring_alloc[i];
-               page_alloc[i].offset += frag_info->frag_stride;
-               if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+               page_alloc[i].page_offset += frag_info->frag_stride;
+
+               if (page_alloc[i].page_offset + frag_info->frag_stride <=
+                   ring_alloc[i].page_size)
                        continue;
+
                if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
                        goto out;
        }
 
        for (i = 0; i < priv->num_frags; i++) {
                frags[i] = ring_alloc[i];
-               dma = ring_alloc[i].dma + ring_alloc[i].offset;
+               dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
                ring_alloc[i] = page_alloc[i];
                rx_desc->data[i].addr = cpu_to_be64(dma);
        }
@@ -117,7 +121,7 @@ out:
                frag_info = &priv->frag_info[i];
                if (page_alloc[i].page != ring_alloc[i].page) {
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               page_alloc[i].size, PCI_DMA_FROMDEVICE);
+                               page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
                        atomic_set(&page->_count, 1);
                        put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
                              int i)
 {
        const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+       u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
 
-       if (frags[i].offset + frag_info->frag_stride > frags[i].size)
-               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
-                                        PCI_DMA_FROMDEVICE);
+       if (next_frag_end > frags[i].page_size)
+               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+                              PCI_DMA_FROMDEVICE);
 
        if (frags[i].page)
                put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
 
                page_alloc = &ring->page_alloc[i];
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                              page_alloc->size, PCI_DMA_FROMDEVICE);
+                              page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
                atomic_set(&page->_count, 1);
                put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
                       i, page_count(page_alloc->page));
 
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               page_alloc->size, PCI_DMA_FROMDEVICE);
-               while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
+               while (page_alloc->page_offset + frag_info->frag_stride <
+                      page_alloc->page_size) {
                        put_page(page_alloc->page);
-                       page_alloc->offset += frag_info->frag_stride;
+                       page_alloc->page_offset += frag_info->frag_stride;
                }
                page_alloc->page = NULL;
        }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                /* Save page reference in skb */
                __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
                skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
-               skb_frags_rx[nr].page_offset = frags[nr].offset;
+               skb_frags_rx[nr].page_offset = frags[nr].page_offset;
                skb->truesize += frag_info->frag_stride;
                frags[nr].page = NULL;
        }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 
        /* Get pointer to first fragment so we could copy the headers into the
         * (linear part of the) skb */
-       va = page_address(frags[0].page) + frags[0].offset;
+       va = page_address(frags[0].page) + frags[0].page_offset;
 
        if (length <= SMALL_PACKET_SIZE) {
                /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
                                                DMA_FROM_DEVICE);
                        ethh = (struct ethhdr *)(page_address(frags[0].page) +
-                                                frags[0].offset);
+                                                frags[0].page_offset);
 
                        if (is_multicast_ether_addr(ethh->h_dest)) {
                                struct mlx4_mac_entry *entry;
index 0d63daa2f422e082d85fd627a892c9fb5f5c1945..c151e7a6710a4b11d7efaa2816f65b4ba2732e81 100644 (file)
@@ -652,7 +652,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
        if (field & 1<<6)
-               dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
        MLX4_GET(dev_cap->max_icm_sz, outbox,
                 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
        if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1713,7 +1713,6 @@ void mlx4_opreq_action(struct work_struct *work)
        u32 *outbox;
        u32 modifier;
        u16 token;
-       u16 type_m;
        u16 type;
        int err;
        u32 num_qps;
@@ -1746,7 +1745,6 @@ void mlx4_opreq_action(struct work_struct *work)
                MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
                MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
                MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
-               type_m = type >> 12;
                type &= 0xfff;
 
                switch (type) {
index 60c9f4f103fce1a2c7d815ccc303b7b94805068c..179d26709c94d93bcc1c720b16d6bbdcf59a2eb7 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
+#include <linux/kmod.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -650,6 +651,27 @@ err_mem:
        return err;
 }
 
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+       int port;
+       int has_ib_port = false;
+       int has_eth_port = false;
+#define EN_DRV_NAME    "mlx4_en"
+#define IB_DRV_NAME    "mlx4_ib"
+
+       for (port = 1; port <= dev->caps.num_ports; port++) {
+               if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+                       has_ib_port = true;
+               else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+                       has_eth_port = true;
+       }
+
+       if (has_ib_port)
+               request_module_nowait(IB_DRV_NAME);
+       if (has_eth_port)
+               request_module_nowait(EN_DRV_NAME);
+}
+
 /*
  * Change the port configuration of the device.
  * Every user of this function must hold the port mutex.
@@ -681,6 +703,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
                }
                mlx4_set_port_mask(dev);
                err = mlx4_register_device(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to register device\n");
+                       goto out;
+               }
+               mlx4_request_modules(dev);
        }
 
 out:
@@ -2305,6 +2332,8 @@ slave_start:
        if (err)
                goto err_port;
 
+       mlx4_request_modules(dev);
+
        mlx4_sense_init(dev);
        mlx4_start_sense(dev);
 
index 55f6245efb6cd250f2efda4e6941bdb81b1fd9f3..70f0213d68c42cd93a5a7c1b5969214ed83e0797 100644 (file)
@@ -645,7 +645,7 @@ static const u8 __promisc_mode[] = {
 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
                                    enum mlx4_net_trans_promisc_mode flow_type)
 {
-       if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+       if (flow_type >= MLX4_FS_MODE_NUM) {
                mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
                return -EINVAL;
        }
@@ -681,7 +681,7 @@ const u16 __sw_id_hw[] = {
 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
                                  enum mlx4_net_trans_rule_id id)
 {
-       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+       if (id >= MLX4_NET_TRANS_RULE_NUM) {
                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
                return -EINVAL;
        }
@@ -706,7 +706,7 @@ static const int __rule_hw_sz[] = {
 int mlx4_hw_rule_sz(struct mlx4_dev *dev,
               enum mlx4_net_trans_rule_id id)
 {
-       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+       if (id >= MLX4_NET_TRANS_RULE_NUM) {
                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
                return -EINVAL;
        }
index 5e0aa569306aab2f6c8185cd1574cdd4abeb4aab..bf06e3610d27787ced2f43b9f9aa9fef105aa3df 100644 (file)
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
 struct mlx4_en_rx_alloc {
        struct page     *page;
        dma_addr_t      dma;
-       u32             offset;
-       u32             size;
+       u32             page_offset;
+       u32             page_size;
 };
 
 struct mlx4_en_tx_ring {
index 79fd269e2c54b3e0a043ab072a4e2b0f888e0088..9e08e35ce351c30046da5d59ce64daa65c48b380 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/init.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 
index 5472cbd34028d9038539824c4167a0c3010a3a01..6ca30739625f7ac568d693b5d4b9d3d9e47b46de 100644 (file)
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
        return 0;
 }
 
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+                          int csum)
 {
        block->token = token;
-       block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
-       block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       if (csum) {
+               block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+                                           sizeof(block->data) - 2);
+               block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       }
 }
 
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
 {
        struct mlx5_cmd_mailbox *next = msg->next;
 
        while (next) {
-               calc_block_sig(next->buf, token);
+               calc_block_sig(next->buf, token, csum);
                next = next->next;
        }
 }
 
-static void set_signature(struct mlx5_cmd_work_ent *ent)
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 {
        ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
-       calc_chain_sig(ent->in, ent->token);
-       calc_chain_sig(ent->out, ent->token);
+       calc_chain_sig(ent->in, ent->token, csum);
+       calc_chain_sig(ent->out, ent->token, csum);
 }
 
 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
        lay->type = MLX5_PCI_CMD_XPORT;
        lay->token = ent->token;
        lay->status_own = CMD_OWNER_HW;
-       if (!cmd->checksum_disabled)
-               set_signature(ent);
+       set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ktime_get_ts(&ent->ts1);
 
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
 
                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
                block = next->buf;
-               if (xor8_buf(block, sizeof(*block)) != 0xff)
-                       return -EINVAL;
 
                memcpy(to, block->data, copy);
                to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                goto err_map;
        }
 
+       cmd->checksum_disabled = 1;
        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
        cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
 
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
-       case MLX5_CMD_STAT_LIM_ERR:                     return -EINVAL;
+       case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
index 443cc4d7b024c02d2cc77861868e1c1b17ee2524..2231d93cc7ad116e55c77e0269fa27878f6c6a4d 100644 (file)
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                goto err_in;
        }
 
+       snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+                name, pci_name(dev->pdev));
        eq->eqn = out.eq_number;
        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-                         name, eq);
+                         eq->name, eq);
        if (err)
                goto err_eq;
 
index b47739b0b5f6dfb34139ad74e02ac10db1d4fbbd..bc0f5fb66e249dc2e652b4126f9b907707b87c6c 100644 (file)
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
        struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
        struct mlx5_cmd_set_hca_cap_mbox_out set_out;
-       struct mlx5_profile *prof = dev->profile;
        u64 flags;
-       int csum = 1;
        int err;
 
        memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
               sizeof(set_ctx->hca_cap));
 
-       if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
-               csum = !!prof->cmdif_csum;
-               flags = be64_to_cpu(set_ctx->hca_cap.flags);
-               if (csum)
-                       flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-               else
-                       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-
-               set_ctx->hca_cap.flags = cpu_to_be64(flags);
-       }
-
        if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
                set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
 
+       flags = be64_to_cpu(query_out->hca_cap.flags);
+       /* disable checksum */
+       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+
+       set_ctx->hca_cap.flags = cpu_to_be64(flags);
        memset(&set_out, 0, sizeof(set_out));
        set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
        set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        if (err)
                goto query_ex;
 
-       if (!csum)
-               dev->cmd.checksum_disabled = 1;
-
 query_ex:
        kfree(query_out);
        kfree(set_ctx);
index 3a2408d448203623754d0aa87e35c16e809da43f..7b12acf210f81cd408a4b0e8a965fd4441165c9f 100644 (file)
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
        __be64                  pas[0];
 };
 
+enum {
+       MAX_RECLAIM_TIME_MSECS  = 5000,
+};
+
 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
 {
        struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        int err;
        int i;
 
+       if (nclaimed)
+               *nclaimed = 0;
+
        memset(&in, 0, sizeof(in));
        outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
        out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
 
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(5000);
+       unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
        struct fw_page *fwp;
        struct rb_node *p;
+       int nclaimed = 0;
        int err;
 
        do {
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
                                return err;
                        }
+                       if (nclaimed)
+                               end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
                }
                if (time_after(jiffies, end)) {
                        mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
index 075f4e21d33df6f4f2b749dcd4aeb9fee1405077..c83d16dc7cd56034de4bfbc7deafcad6ddb42439 100644 (file)
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
        w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
        ks_wrreg16(ks, KS_MARL, w);
 
-       memcpy(ks->mac_addr, data, 6);
+       memcpy(ks->mac_addr, data, ETH_ALEN);
 
        if (ks->enabled)
                ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
        }
        netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
 
-       memcpy(netdev->dev_addr, ks->mac_addr, 6);
+       memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
 
        ks_set_mac(ks, netdev->dev_addr);
 
index 8ebc352bcbe6b817912e9d055202c4c077779924..ddd252a3da9c023794e0bde418d689d87cb4b8c4 100644 (file)
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
        struct platform_info *info = pci_get_drvdata(pdev);
        struct dev_info *hw_priv = &info->dev_info;
 
-       pci_set_drvdata(pdev, NULL);
-
        release_mem_region(pci_resource_start(pdev, 0),
                pci_resource_len(pdev, 0));
        for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
 
 static char pcidev_name[] = "ksz884xp";
 
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
        { PCI_VENDOR_ID_MICREL_KS, 0x8841,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        { PCI_VENDOR_ID_MICREL_KS, 0x8842,
index 83c2091c9c234bfe9ecbb4067d1396aee19f49f6..cbd013379252a158322aa5c4f508d0a9d1413020 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
 
 #include "moxart_ether.h"
 
@@ -448,7 +447,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
        irq = irq_of_parse_and_map(node, 0);
        if (irq <= 0) {
                netdev_err(ndev, "irq_of_parse_and_map failed\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto irq_map_fail;
        }
 
        priv = netdev_priv(ndev);
@@ -472,24 +472,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
        priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->tx_desc_base == NULL)
+       if (priv->tx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->rx_desc_base == NULL)
+       if (priv->rx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->tx_buf_base)
+       if (!priv->tx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->rx_buf_base)
+       if (!priv->rx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        platform_set_drvdata(pdev, ndev);
 
@@ -522,7 +530,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
 init_fail:
        netdev_err(ndev, "init failed\n");
        moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+       free_netdev(ndev);
        return ret;
 }
 
@@ -543,7 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
        { }
 };
 
-struct __initdata platform_driver moxart_mac_driver = {
+static struct platform_driver moxart_mac_driver = {
        .probe  = moxart_mac_probe,
        .remove = moxart_remove,
        .driver = {
index 149355b52ad0c8f2d4aac4f6cfe83041bc59f45b..68026f7e8ba308d62c66570fb5ac7c19eb9f8994 100644 (file)
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock_bh(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
 
        /* Walk the multicast list, and add each address */
        netdev_for_each_mc_addr(ha, dev) {
-               memcpy(data, &ha->addr, 6);
+               memcpy(data, &ha->addr, ETH_ALEN);
                cmd.data0 = ntohl(data[0]);
                cmd.data1 = ntohl(data[1]);
                err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
        }
 
        /* change the dev structure */
-       memcpy(dev->dev_addr, sa->sa_data, 6);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
        return 0;
 }
 
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
        set_fw_name(mgp, NULL, false);
        free_netdev(netdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E     0x0008
index 7a5e295588b0740f38c288364de1a51be6898fd9..64ec2a437f46a3280e9377c55d8226a19f82d089 100644 (file)
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
  err_ioremap:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
 
  err_pci_request_regions:
        free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
        pci_release_regions (pdev);
        iounmap(ioaddr);
        free_netdev (dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 51b00941302c8d7e7eea3dcad6daa17c7702c65e..9eeddbd0b2c7c749de1c863e4b142c25f35c4fb9 100644 (file)
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
        free_shared_mem(sp);
        pci_disable_device(pdev);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
        iounmap(sp->bar0);
        iounmap(sp->bar1);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
        pci_disable_device(pdev);
 }
index 5a20eaf903dd8b8833262166e4be4e65abd6f674..8614eeb7de8140114d256ae05c62ce95d5844a18 100644 (file)
@@ -4739,7 +4739,6 @@ _exit6:
 _exit5:
        vxge_device_unregister(hldev);
 _exit4:
-       pci_set_drvdata(pdev, NULL);
        vxge_hw_device_terminate(hldev);
        pci_disable_sriov(pdev);
 _exit3:
@@ -4782,7 +4781,6 @@ static void vxge_remove(struct pci_dev *pdev)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
 
        vxge_device_unregister(hldev);
-       pci_set_drvdata(pdev, NULL);
        /* Do not call pci_disable_sriov here, as it will break child devices */
        vxge_hw_device_terminate(hldev);
        iounmap(vdev->bar0);
index a061b93efe66a29fd663a49bf16d1a510ff0879f..ba3ca18611f7905d559eaec225b6a4ae837c3dda 100644 (file)
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
 
        if (pldat->dma_buff_base_v == 0) {
-               pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
-               pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+               ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (ret)
+                       goto err_out_free_irq;
+
                pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
 
                /* Allocate a chunk of memory for the DMA ethernet buffers
index 622aa75904c4ee07c1b666466925e08ce0df25d3..2006a07004829dcc7f1743cf343480c6db3c5c95 100644 (file)
@@ -1552,8 +1552,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
 
        p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (result)
+               goto err;
 
        netif_carrier_off(netdev);
        result = register_netdev(netdev);
index 6797b1075874ae26cca6756bf450559ed7c7e6be..2a9003071d51a985be9b133fea76a585dabdf2d4 100644 (file)
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
 extern const char pch_driver_version[];
 
 /* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 
 /* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
 
 /* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
 
 /* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
-                                 u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+                         u16 data);
 #endif /* _PCH_GBE_H_ */
index cac33e5f9bc2635cd2a4fd9b38c1af6f155164d8..b6bdeb3c19711ac960646dfdbeef2207b86f366d 100644 (file)
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
                iounmap(hmp->base);
                free_netdev(dev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index d28593b1fc3e8664b1f6aad8e6d53dba4fdd1a5c..07a890eb72ad8677a47782f84f79c6f02ee1140e 100644 (file)
@@ -513,7 +513,6 @@ err_out_unmap_rx:
 err_out_unmap_tx:
         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
        pci_release_regions (pdev);
 
        free_netdev (dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 
index 5b65356e7568b28e0d2030c7436863db10a44b32..dbaa49e58b0cd8610c71a83c8170c7f10255453e 100644 (file)
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
        pasemi_dma_free_chan(&mac->tx->chan);
        pasemi_dma_free_chan(&mac->rx->chan);
 
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 }
 
index 32675e16021e8aafd2a5598473cc2ad54dae7668..9adcdbb49476e53face68f353dd7261ffea356b3 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.82"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
 
 int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
 int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
-                               int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 
 extern const struct ethtool_ops netxen_nic_ethtool_ops;
 
index 32c790659f9c1ba0410614c30818eb9cf20a46ee..0c64c82b9acffdcfd58b9471f971d0eebe95a9cc 100644 (file)
@@ -958,6 +958,7 @@ enum {
 #define NETXEN_PEG_HALT_STATUS2        (NETXEN_CAM_RAM(0xac))
 #define NX_CRB_DEV_REF_COUNT           (NETXEN_CAM_RAM(0x138))
 #define NX_CRB_DEV_STATE               (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY                 (NETXEN_CAM_RAM(0x178))
 
 /* MiniDIMM related macros */
 #define NETXEN_DIMM_CAPABILITY         (NETXEN_CAM_RAM(0x258))
index 8375cbde996976047475b9c5e6c16f33fd365df4..67efe754367de10d78e6bb1cbacd65caa6f1524c 100644 (file)
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
 
        mac_req = (nx_mac_req_t *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 }
index cbd75f97ffb3c6d32624e15b81adcb05d5d4b1d9..3bec8cfebf99299ef9c3733b47033a26fe8ba7b1 100644 (file)
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
        return 0;
 }
 
+#define NETXEN_ULA_ADAPTER_KEY         (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY     (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+       u32 temp;
+
+       /* Print ULA info only once for an adapter */
+       if (adapter->portnum != 0)
+               return;
+
+       temp = NXRD32(adapter, NETXEN_ULA_KEY);
+       switch (temp) {
+       case NETXEN_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "ULA adapter");
+               break;
+       case NETXEN_NON_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "non ULA adapter");
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
 #ifdef CONFIG_PCIEAER
 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
 {
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_disable_msi;
        }
 
+       netxen_read_ula_info(adapter);
+
        err = netxen_setup_netdev(adapter, netdev);
        if (err)
                goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
 }
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        free_netdev(netdev);
 }
index 91a8fcd6c24654e52d94aa6a96f68bf02e32a4ee..0758b9435358b9f4012e703a2605ad64ef4aff7c 100644 (file)
@@ -3916,7 +3916,6 @@ err_out_free_regions:
        pci_release_regions(pdev);
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_out:
        return err;
 }
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
 
        iounmap(qdev->mem_map_registers);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(ndev);
 }
 
index 81bf83604c4fb1c04aa0a5dfe361d1ffaefd160d..0c2405dbc970eed07e69dc411bf73c1f1c65bfe0 100644 (file)
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID  "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 51
+#define QLCNIC_LINUX_VERSIONID  "5.3.51"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -961,8 +961,6 @@ struct qlcnic_ipaddr {
 #define __QLCNIC_SRIOV_CAPABLE         11
 #define __QLCNIC_MBX_POLL_ENABLE       12
 #define __QLCNIC_DIAG_MODE             13
-#define __QLCNIC_DCB_STATE             14
-#define __QLCNIC_DCB_IN_AEN            15
 
 #define QLCNIC_INTERRUPT_TEST          1
 #define QLCNIC_LOOPBACK_TEST           2
@@ -1199,6 +1197,7 @@ struct qlcnic_npar_info {
        u8      promisc_mode;
        u8      offload_flags;
        u8      pci_func;
+       u8      mac[ETH_ALEN];
 };
 
 struct qlcnic_eswitch {
@@ -2115,98 +2114,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
 
        return status;
 }
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_hw_capability)
-               return dcb->ops->get_hw_capability(adapter);
-
-       return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->free)
-               dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->attach)
-               return dcb->ops->attach(adapter);
-
-       return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->query_hw_capability)
-               return dcb->ops->query_hw_capability(adapter, buf);
-
-       return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_info)
-               dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->query_cee_param)
-               return dcb->ops->query_cee_param(adapter, buf, type);
-
-       return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_cee_cfg)
-               return dcb->ops->get_cee_cfg(adapter);
-
-       return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->register_aen)
-               dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                        void *msg)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->handle_aen)
-               dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->init_dcbnl_ops)
-               dcb->ops->init_dcbnl_ops(adapter);
-}
 #endif                         /* __QLCNIC_H_ */
index 3ca00e05f23d2ade287d6c1d9047410eeac43114..a126bdf2795248a1fdb888a18aef1ce3b85f80db 100644 (file)
@@ -902,7 +902,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
                         QLCNIC_MBX_RSP(event[0]));
                break;
        case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
-               qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
                break;
        default:
                dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -2321,19 +2321,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
                        i++;
                        memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
                        i = i + 3;
-                       if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-                               dev_info(dev, "id = %d active = %d type = %d\n"
-                                        "\tport = %d min bw = %d max bw = %d\n"
-                                        "\tmac_addr =  %pM\n", pci_info->id,
-                                        pci_info->active, pci_info->type,
-                                        pci_info->default_port,
-                                        pci_info->tx_min_bw,
-                                        pci_info->tx_max_bw, pci_info->mac);
                }
-               if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-                       dev_info(dev, "Max functions = %d, active functions = %d\n",
-                                ahw->max_pci_func, ahw->act_pci_func);
-
        } else {
                dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
                err = -EIO;
@@ -3279,12 +3267,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
 {
        return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
-               sizeof(adapter->ahw->ext_reg_tbl)) +
-               (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
-               sizeof(adapter->ahw->reg_tbl));
+               sizeof(*adapter->ahw->ext_reg_tbl)) +
+               (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+               sizeof(*adapter->ahw->reg_tbl));
 }
 
 int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3381,10 +3369,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
        }
        config = ahw->port_config;
        if (config & QLC_83XX_CFG_STD_PAUSE) {
-               if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+               switch (MSW(config)) {
+               case QLC_83XX_TX_PAUSE:
                        pause->tx_pause = 1;
-               if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+                       break;
+               case QLC_83XX_RX_PAUSE:
                        pause->rx_pause = 1;
+                       break;
+               case QLC_83XX_TX_RX_PAUSE:
+               default:
+                       /* Backward compatibility for existing
+                        * flash definitions
+                        */
+                       pause->tx_pause = 1;
+                       pause->rx_pause = 1;
+               }
        }
 
        if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3426,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
                ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
                ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
        } else if (!pause->rx_pause && !pause->tx_pause) {
-               ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+               ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+                                     QLC_83XX_CFG_STD_PAUSE);
        }
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
index 533e150503afc06fb5db107d1fa4bdbcb894abb8..9f4e4c4ab5214e2215b6cf061347f429c68ff5e6 100644 (file)
@@ -363,6 +363,9 @@ enum qlcnic_83xx_states {
 #define QLC_83XX_LINK_EEE(data)                ((data) & BIT_13)
 #define QLC_83XX_DCBX(data)                    (((data) >> 28) & 7)
 #define QLC_83XX_AUTONEG(data)                 ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE                      0x10
+#define QLC_83XX_RX_PAUSE                      0x20
+#define QLC_83XX_TX_RX_PAUSE                   0x30
 #define QLC_83XX_CFG_STD_PAUSE                 (1 << 5)
 #define QLC_83XX_CFG_STD_TX_PAUSE              (1 << 20)
 #define QLC_83XX_CFG_STD_RX_PAUSE              (2 << 20)
@@ -626,7 +629,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
 int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
                                    struct qlcnic_info *, u8);
 int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
index f09e787af0b25ae9d523f531b47b67805c7bc8e0..e2cd48417041c1e749058d89f235a02822eaf159 100644 (file)
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
        if (adapter->portnum == 0)
                qlcnic_set_drv_version(adapter);
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
        qlcnic_83xx_idc_attach_driver(adapter);
 
        return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_mailbox *mbx = ahw->mailbox;
        int ret = 0;
+       u32 owner;
        u32 val;
 
        /* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
                        clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+               }  else {
+                       owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+                       if (ahw->pci_func == owner)
+                               qlcnic_dump_fw(adapter);
                }
                return -EIO;
        }
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
        adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
        qlcnic_83xx_periodic_tasks(adapter);
 
+       /* Do not reschedule if firmaware is in hanged state and auto
+        * recovery is disabled
+        */
+       if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+               return;
+
        /* Re-schedule the function */
        if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
                qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2163,6 +2174,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
 int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_dcb *dcb;
        int err = 0;
 
        ahw->msix_supported = !!qlcnic_use_msi_x;
@@ -2220,8 +2232,10 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
        if (err)
                goto disable_mbx_intr;
 
-       if (adapter->dcb && qlcnic_dcb_attach(adapter))
-               qlcnic_clear_dcb_ops(adapter);
+       dcb = adapter->dcb;
+
+       if (dcb && qlcnic_dcb_attach(dcb))
+               qlcnic_clear_dcb_ops(dcb);
 
        /* Periodically monitor device status */
        qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
index 0248a4c2f5dd15fb1f12827b90bf1af0b1e73ba1..734d28602ac3f335214ba9cc5f1c9d4408d6d554 100644 (file)
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
  **/
 static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
 {
-       int err = -EIO;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_npar_info *npar;
+       int i, err = -EIO;
 
        qlcnic_83xx_get_minidump_template(adapter);
+
        if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
                if (qlcnic_init_pci_info(adapter))
                        return err;
 
+               npar = adapter->npars;
+
+               for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+                       dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+                                npar->pci_func, npar->active, npar->type,
+                                npar->phy_port, npar->min_bw, npar->max_bw,
+                                npar->mac);
+               }
+
+               dev_info(dev, "Max functions = %d, active functions = %d\n",
+                        ahw->max_pci_func, ahw->act_pci_func);
+
                if (qlcnic_83xx_set_vnic_opmode(adapter))
                        return err;
 
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
                return err;
 
        qlcnic_83xx_config_vnic_buff_descriptors(adapter);
-       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+       ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
        adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
        qlcnic_83xx_enable_vnic_mode(adapter, 1);
 
-       dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
-                adapter->ahw->fw_hal_version);
+       dev_info(dev, "HAL Version: %d, Management function\n",
+                ahw->fw_hal_version);
 
        return 0;
 }
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
-                                            int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+                                       int func, int *port_id)
 {
        struct qlcnic_info nic_info;
        int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
        else
                err = -EIO;
 
-       return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
-       int id, err = 0;
-
-       err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
-       if (err)
-               return err;
-
-       if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
-               if (!qlcnic_enable_eswitch(adapter, id, 1))
-                       adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
-               else
-                       err = -EIO;
-       }
+       if (!err)
+               adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
 
        return err;
 }
index d62d5ce432ec7525ca3f504b790d050797f343d2..86bca7c14f99245a07e65a7385e8b67796afb3a9 100644 (file)
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
 static void qlcnic_dcb_aen_work(struct work_struct *);
 static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
-
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
-
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 
 struct qlcnic_dcb_capability {
        bool    tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
        .query_cee_param        = qlcnic_83xx_dcb_query_cee_param,
        .get_cee_cfg            = qlcnic_83xx_dcb_get_cee_cfg,
        .register_aen           = qlcnic_83xx_dcb_register_aen,
-       .handle_aen             = qlcnic_83xx_dcb_handle_aen,
+       .aen_handler            = qlcnic_83xx_dcb_aen_handler,
 };
 
 static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
        .get_hw_capability      = qlcnic_82xx_dcb_get_hw_capability,
        .query_cee_param        = qlcnic_82xx_dcb_query_cee_param,
        .get_cee_cfg            = qlcnic_82xx_dcb_get_cee_cfg,
-       .handle_aen             = qlcnic_82xx_dcb_handle_aen,
+       .aen_handler            = qlcnic_82xx_dcb_aen_handler,
 };
 
 static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
        return j;
 }
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
 {
-       if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
-               adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+       if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+               dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
 }
 
 static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
                adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
 }
 
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_dcb *dcb;
 
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
        adapter->dcb = dcb;
        dcb->adapter = adapter;
        qlcnic_set_dcb_ops(adapter);
+       dcb->state = 0;
 
        return 0;
 }
 
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
+       struct qlcnic_adapter *adapter;
 
        if (!dcb)
                return;
 
-       qlcnic_dcb_register_aen(adapter, 0);
+       adapter = dcb->adapter;
+       qlcnic_dcb_register_aen(dcb, 0);
 
-       while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                usleep_range(10000, 11000);
 
        cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
        adapter->dcb = NULL;
 }
 
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
 {
-       qlcnic_dcb_get_hw_capability(adapter);
-       qlcnic_dcb_get_cee_cfg(adapter);
-       qlcnic_dcb_register_aen(adapter, 1);
+       qlcnic_dcb_get_hw_capability(dcb);
+       qlcnic_dcb_get_cee_cfg(dcb);
+       qlcnic_dcb_register_aen(dcb, 1);
 }
 
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        int err = 0;
 
        INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
 
        dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
        if (!dcb->wq) {
-               dev_err(&adapter->pdev->dev,
+               dev_err(&dcb->adapter->pdev->dev,
                        "DCB workqueue allocation failed. DCB will be disabled\n");
                return -1;
        }
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
                goto out_free_cfg;
        }
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(dcb);
 
        return 0;
 out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
        return err;
 }
 
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
-                                           char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
 {
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_cmd_args cmd;
        u32 mbx_out;
        int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
        return err;
 }
 
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
 {
-       struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
        u32 mbx_out;
        int err;
 
        memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
 
-       err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+       err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
        if (err)
                return err;
 
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
        if (cap->max_num_tc > QLC_DCB_MAX_TC ||
            cap->max_ets_tc > cap->max_num_tc ||
            cap->max_pfc_tc > cap->max_num_tc) {
-               dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+               dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
                return -EINVAL;
        }
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+       struct qlcnic_dcb_cfg *cfg = dcb->cfg;
        struct qlcnic_dcb_capability *cap;
        u32 mbx_out;
        int err;
 
-       err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
        if (err)
                return err;
 
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
        cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
 
        if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
                                           char *buf, u8 type)
 {
        u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
        struct device *dev = &adapter->pdev->dev;
        dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
                return -EINVAL;
        }
 
-       addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
-                                 GFP_KERNEL);
+       addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
        if (addr == NULL)
                return -ENOMEM;
 
@@ -488,72 +489,67 @@ out:
        qlcnic_free_mbx_args(&cmd);
 
 out_free_rsp:
-       dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+       dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
        struct qlcnic_dcb_mbx_params *mbx;
        int err;
 
-       mbx = adapter->dcb->param;
+       mbx = dcb->param;
        if (!mbx)
                return 0;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
                                         QLC_DCB_LOCAL_PARAM_FWID);
        if (err)
                return err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
                                         QLC_DCB_OPER_PARAM_FWID);
        if (err)
                return err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
                                         QLC_DCB_PEER_PARAM_FWID);
        if (err)
                return err;
 
        mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
 
-       qlcnic_dcb_data_cee_param_map(adapter);
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
        return err;
 }
 
 static void qlcnic_dcb_aen_work(struct work_struct *work)
 {
-       struct qlcnic_adapter *adapter;
        struct qlcnic_dcb *dcb;
 
        dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
-       adapter = dcb->adapter;
 
-       qlcnic_dcb_get_cee_cfg(adapter);
-       clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+       qlcnic_dcb_get_cee_cfg(dcb);
+       clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
 }
 
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                      void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                return;
 
        queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
 
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
        u32 mbx_out;
        int err;
 
-       err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
        if (err)
                return err;
 
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
                cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
 
        if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        return err;
 }
 
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
                                           char *buf, u8 idx)
 {
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_dcb_mbx_params mbx_out;
        int err, i, j, k, max_app, size;
        struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
        return err;
 }
 
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        int err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
        if (err)
                return err;
 
-       qlcnic_dcb_data_cee_param_map(adapter);
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
        return err;
 }
 
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
-                                       bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
 {
        u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_cmd_args cmd;
        int err;
 
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
        return err;
 }
 
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                      void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        u32 *val = data;
 
-       if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                return;
 
        if (*val & BIT_8)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
        else
-               clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               clear_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-       return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+       return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
 }
 
 static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
 {
-       memcpy(addr, netdev->dev_addr, netdev->addr_len);
+       memcpy(addr, netdev->perm_addr, netdev->addr_len);
 }
 
 static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
        *prio = *pgid = *bw_per = *up_tc_map = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->tc_param_valid)
                return;
 
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
        *bw_pct = 0;
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->tc_param_valid)
                return;
 
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
        *setting = 0;
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->pfc_mode_enable)
                return;
 
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return -EINVAL;
 
        switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
                                .protocol = id,
                             };
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb *dcb = adapter->dcb;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
                return 0;
 
        return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cee *type;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 1;
 
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
 
        *app_count = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
        struct qlcnic_dcb_app *app;
        int i, j;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
        struct qlcnic_dcb_cee *peer;
        u8 i, j, k, map;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
 
        pfc->pfc_en = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &cfg->type[QLC_DCB_PEER_IDX];
index b87ce9fb503e48298ea9e96e0490047fd3bb488b..c04ae0cdc108c98eb57d8a860d772535379178af 100644 (file)
@@ -8,26 +8,29 @@
 #ifndef __QLCNIC_DCBX_H
 #define __QLCNIC_DCBX_H
 
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE       0
+#define QLCNIC_DCB_AEN_MODE    1
 
 #ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
 #else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 { return 0; }
 #endif
 
+struct qlcnic_dcb;
+
 struct qlcnic_dcb_ops {
-       void (*init_dcbnl_ops) (struct qlcnic_adapter *);
-       void (*free) (struct qlcnic_adapter *);
-       int (*attach) (struct qlcnic_adapter *);
-       int (*query_hw_capability) (struct qlcnic_adapter *, char *);
-       int (*get_hw_capability) (struct qlcnic_adapter *);
-       void (*get_info) (struct qlcnic_adapter *);
-       int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
-       int (*get_cee_cfg) (struct qlcnic_adapter *);
-       int (*register_aen) (struct qlcnic_adapter *, bool);
-       void (*handle_aen) (struct qlcnic_adapter *, void *);
+       int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+       int (*get_hw_capability) (struct qlcnic_dcb *);
+       int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+       void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+       int (*register_aen) (struct qlcnic_dcb *, bool);
+       void (*aen_handler) (struct qlcnic_dcb *, void *);
+       int (*get_cee_cfg) (struct qlcnic_dcb *);
+       void (*get_info) (struct qlcnic_dcb *);
+       int (*attach) (struct qlcnic_dcb *);
+       void (*free) (struct qlcnic_dcb *);
 };
 
 struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
        struct workqueue_struct         *wq;
        struct qlcnic_dcb_ops           *ops;
        struct qlcnic_dcb_cfg           *cfg;
+       unsigned long                   state;
 };
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+       kfree(dcb);
+       dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_hw_capability)
+               return dcb->ops->get_hw_capability(dcb);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->free)
+               dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->attach)
+               return dcb->ops->attach(dcb);
+
+       return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+       if (dcb && dcb->ops->query_hw_capability)
+               return dcb->ops->query_hw_capability(dcb, buf);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_info)
+               dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+       if (dcb && dcb->ops->query_cee_param)
+               return dcb->ops->query_cee_param(dcb, buf, type);
+
+       return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_cee_cfg)
+               return dcb->ops->get_cee_cfg(dcb);
+
+       return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+       if (dcb && dcb->ops->register_aen)
+               dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+       if (dcb && dcb->ops->aen_handler)
+               dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->init_dcbnl_ops)
+               dcb->ops->init_dcbnl_ops(dcb);
+}
 #endif
index 4d7ad0074d1c0c1eb1ef1026ab8954bd050f4071..b2a8805997ca98ccde591cd05dc0b2951470ef19 100644 (file)
@@ -187,8 +187,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
                return -1;
 }
 
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN   (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define        QLCNIC_TX_INTR_NOT_CONFIGURED   0X78563412
+
 #define QLCNIC_MAX_EEPROM_LEN   1024
 
 static const u32 diag_registers[] = {
@@ -219,7 +219,15 @@ static const u32 ext_diag_registers[] = {
 };
 
 #define QLCNIC_MGMT_API_VERSION        2
-#define QLCNIC_ETHTOOL_REGS_VER        3
+#define QLCNIC_ETHTOOL_REGS_VER        4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+       int ring_regs_cnt = (adapter->max_drv_tx_rings * 5) +
+                           (adapter->max_rds_rings * 2) +
+                           (adapter->max_sds_rings * 3) + 5;
+       return ring_regs_cnt * sizeof(u32);
+}
 
 static int qlcnic_get_regs_len(struct net_device *dev)
 {
@@ -231,7 +239,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
        else
                len = sizeof(ext_diag_registers) + sizeof(diag_registers);
 
-       return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+       len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+       len += qlcnic_get_ring_regs_len(adapter);
+       return len;
 }
 
 static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +503,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_rings;
+       struct qlcnic_host_tx_ring *tx_ring;
        u32 *regs_buff = p;
        int ring, i = 0;
 
@@ -512,21 +524,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
                return;
 
-       regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
-       regs_buff[i++] = 1; /* No. of tx ring */
-       regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
-       regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
-       regs_buff[i++] = 2; /* No. of rx ring */
-       regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
-       regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+       /* Marker btw regs and TX ring count */
+       regs_buff[i++] = 0xFFEFCDAB;
+
+       regs_buff[i++] = adapter->max_drv_tx_rings; /* No. of TX ring */
+       for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+               regs_buff[i++] = tx_ring->sw_consumer;
+               regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+               regs_buff[i++] = tx_ring->producer;
+               if (tx_ring->crb_intr_mask)
+                       regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+               else
+                       regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+       }
 
-       regs_buff[i++] = adapter->max_sds_rings;
+       regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_rings = &recv_ctx->rds_rings[ring];
+               regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+               regs_buff[i++] = rds_rings->producer;
+       }
 
+       regs_buff[i++] = adapter->max_sds_rings; /* No. of SDS ring */
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &(recv_ctx->sds_rings[ring]);
                regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+               regs_buff[i++] = sds_ring->consumer;
+               regs_buff[i++] = readl(sds_ring->crb_intr_mask);
        }
 }
 
@@ -665,7 +691,7 @@ static int qlcnic_set_channels(struct net_device *dev,
                        return err;
        }
 
-       if (channel->tx_count) {
+       if (qlcnic_82xx_check(adapter) && channel->tx_count) {
                err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
                if (err)
                        return err;
@@ -1794,3 +1820,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
        .set_msglevel           = qlcnic_set_msglevel,
        .get_msglevel           = qlcnic_get_msglevel,
 };
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+       .get_settings           = qlcnic_get_settings,
+       .get_drvinfo            = qlcnic_get_drvinfo,
+       .set_msglevel           = qlcnic_set_msglevel,
+       .get_msglevel           = qlcnic_get_msglevel,
+       .set_dump               = qlcnic_set_dump,
+};
index f8adc7b01f1f5ef9e62899a9c68c5eea9d2e2cba..73e72eb83bdfb346fd034140b5df7b654d7db193 100644 (file)
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 
        mac_req = (struct qlcnic_mac_req *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
        vlan_req->vlan_id = cpu_to_le16(vlan_id);
index 11b4bb83b9308b2c4a9331ea737ee7766c074794..897627dd1d04e7f6c3cd60ad25ede51fad376327 100644 (file)
@@ -1011,7 +1011,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
                }
                break;
        case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
-               qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
                break;
        default:
                break;
index c4c5023e1fdf64aed12efce219e2acb04922b142..dcf4a4e7ce23d49073b949b2275f8192b09a0c97 100644 (file)
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
        while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                usleep_range(10000, 11000);
 
+       if (!adapter->fw_work.work.func)
+               return;
+
        cancel_delayed_work_sync(&adapter->fw_work);
 }
 
@@ -816,7 +819,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
 int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_pci_info *pci_info;
-       int i, ret = 0, j = 0;
+       int i, id = 0, ret = 0, j = 0;
        u16 act_pci_func;
        u8 pfn;
 
@@ -857,7 +860,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
                        continue;
 
                if (qlcnic_port_eswitch_cfg_capability(adapter)) {
-                       if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+                       if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+                                                                &id))
                                adapter->npars[j].eswitch_status = true;
                        else
                                continue;
@@ -872,15 +876,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
                adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
                adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
 
+               memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
                j++;
        }
 
-       if (qlcnic_82xx_check(adapter)) {
+       /* Update eSwitch status for adapters without per port eSwitch
+        * configuration capability
+        */
+       if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
                for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
                        adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
-       } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
-               for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
-                       qlcnic_enable_eswitch(adapter, i, 1);
        }
 
        kfree(pci_info);
@@ -2066,7 +2071,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                return err;
        }
 
-       qlcnic_dcb_init_dcbnl_ops(adapter);
+       qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
 
        return 0;
 }
@@ -2161,17 +2166,6 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
                qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
 }
 
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
-       return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
-       kfree(adapter->dcb);
-       adapter->dcb = NULL;
-}
-
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2180,6 +2174,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct qlcnic_hardware_context *ahw;
        int err, pci_using_dac = -1;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+       struct qlcnic_dcb *dcb;
 
        if (pdev->is_virtfn)
                return -ENODEV;
@@ -2254,7 +2249,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = qlcnic_alloc_adapter_resources(adapter);
        if (err)
-               goto err_out_free_netdev;
+               goto err_out_free_wq;
 
        adapter->dev_rst_time = jiffies;
        adapter->ahw->revision_id = pdev->revision;
@@ -2275,8 +2270,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->portnum = adapter->ahw->pci_func;
                err = qlcnic_start_firmware(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
-                       goto err_out_free_hw;
+                       dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+                               "\t\tIf reboot doesn't help, try flashing the card\n");
+                       goto err_out_maintenance_mode;
                }
 
                qlcnic_get_multiq_capability(adapter);
@@ -2299,8 +2295,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
                adapter->flags |= QLCNIC_NEED_FLR;
 
-               if (adapter->dcb && qlcnic_dcb_attach(adapter))
-                       qlcnic_clear_dcb_ops(adapter);
+               dcb = adapter->dcb;
+
+               if (dcb && qlcnic_dcb_attach(dcb))
+                       qlcnic_clear_dcb_ops(dcb);
 
        } else if (qlcnic_83xx_check(adapter)) {
                adapter->max_drv_tx_rings = 1;
@@ -2392,6 +2390,9 @@ err_out_disable_msi:
 err_out_free_hw:
        qlcnic_free_adapter_resources(adapter);
 
+err_out_free_wq:
+       destroy_workqueue(adapter->qlcnic_wq);
+
 err_out_free_netdev:
        free_netdev(netdev);
 
@@ -2405,9 +2406,24 @@ err_out_free_res:
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
+
+err_out_maintenance_mode:
+       netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+       SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+       err = register_netdev(netdev);
+
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register net device\n");
+               qlcnic_clr_all_drv_state(adapter, 0);
+               goto err_out_free_hw;
+       }
+
+       pci_set_drvdata(pdev, adapter);
+       qlcnic_add_sysfs(adapter);
+
+       return 0;
 }
 
 static void qlcnic_remove(struct pci_dev *pdev)
@@ -2426,7 +2442,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
        qlcnic_cancel_idc_work(adapter);
        ahw = adapter->ahw;
 
-       qlcnic_dcb_free(adapter);
+       qlcnic_dcb_free(adapter->dcb);
 
        unregister_netdev(netdev);
        qlcnic_sriov_cleanup(adapter);
@@ -2465,7 +2481,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
        pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        if (adapter->qlcnic_wq) {
                destroy_workqueue(adapter->qlcnic_wq);
@@ -2518,8 +2533,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
 static int qlcnic_open(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       u32 state;
        int err;
 
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+               netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+
+               return -EIO;
+       }
+
        netif_carrier_off(netdev);
 
        err = qlcnic_attach(adapter);
@@ -3228,6 +3251,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
                return;
 
        state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+               netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+                          __func__);
+               qlcnic_api_unlock(adapter);
+
+               return;
+       }
 
        if (state == QLCNIC_DEV_READY) {
                QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
@@ -3289,7 +3319,7 @@ qlcnic_attach_work(struct work_struct *work)
                return;
        }
 attach:
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
 
        if (netif_running(netdev)) {
                if (qlcnic_up(adapter, netdev))
@@ -3314,6 +3344,8 @@ done:
 static int
 qlcnic_check_health(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
        u32 state = 0, heartbeat;
        u32 peg_status;
        int err = 0;
@@ -3338,7 +3370,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+               if (ahw->reset_context && qlcnic_auto_fw_reset)
                        qlcnic_reset_hw_context(adapter);
 
                return 0;
@@ -3381,6 +3413,9 @@ detach:
 
                qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
                QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+       } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+                  adapter->flags & QLCNIC_FW_RESET_OWNER) {
+               qlcnic_dump_fw(adapter);
        }
 
        return 1;
@@ -3613,11 +3648,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
        u8 max_hw = QLCNIC_MAX_TX_RINGS;
        u32 max_allowed;
 
-       if (!qlcnic_82xx_check(adapter)) {
-               netdev_err(netdev, "No Multi TX-Q support\n");
-               return -EINVAL;
-       }
-
        if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
                return -EINVAL;
@@ -3657,8 +3687,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
        u8 max_hw = adapter->ahw->max_rx_ques;
        u32 max_allowed;
 
-       if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
-           !qlcnic_use_msi) {
+       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No RSS support in INT-x mode\n");
                return -EINVAL;
        }
index 15513608d4808851917f1b5294e7733a5c4ea5cd..7763962e2ec4e9128ae22cf7b8ee694248d8058d 100644 (file)
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                }
 
                if (ops_index == ops_cnt) {
-                       dev_info(&adapter->pdev->dev,
-                                "Invalid entry type %d, exiting dump\n",
+                       dev_info(dev, "Skipping unknown entry opcode %d\n",
                                 entry->hdr.type);
-                       goto error;
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
                }
 
                /* Collect dump for this entry */
                dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
-               if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+               if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
                        entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+
                buf_offset += entry->hdr.cap_size;
                entry_offset += entry->hdr.offset;
                buffer = fw_dump->data + buf_offset;
        }
-       if (dump_size != buf_offset) {
-               dev_info(&adapter->pdev->dev,
-                        "Captured(%d) and expected size(%d) do not match\n",
-                        buf_offset, dump_size);
-               goto error;
-       } else {
-               fw_dump->clr = 1;
-               snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
-                        adapter->netdev->name);
-               dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
-                        adapter->netdev->name, fw_dump->size);
-               /* Send a udev event to notify availability of FW dump */
-               kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
-               return 0;
-       }
-error:
+
+       fw_dump->clr = 1;
+       snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+       dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+                adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+       /* Send a udev event to notify availability of FW dump */
+       kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
        if (fw_dump->use_pex_dma)
                dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
                                  fw_dump->dma_buffer, fw_dump->phys_addr);
-       vfree(fw_dump->data);
-       return -EINVAL;
+
+       return 0;
 }
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
index 392b9bd12b4fb67585501f817defd1fb0194b9ca..8b96e29df30fbd2b26da283bca65aa3d88d2cb41 100644 (file)
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
                                 int pci_using_dac)
 {
+       struct qlcnic_dcb *dcb;
        int err;
 
        INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -533,8 +534,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
        if (err)
                goto err_out_send_channel_term;
 
-       if (adapter->dcb && qlcnic_dcb_attach(adapter))
-               qlcnic_clear_dcb_ops(adapter);
+       dcb = adapter->dcb;
+
+       if (dcb && qlcnic_dcb_attach(dcb))
+               qlcnic_clear_dcb_ops(dcb);
 
        err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
        if (err)
@@ -1577,7 +1580,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
        if (err)
                goto err_out_term_channel;
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
 
        return 0;
 
index 330d9a8774ad69a1d6d8e4d4f38aa87e243bf699..686f460b15022b4b2b7759ad29493a64a1f84592 100644 (file)
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
+       rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
        /* After disabling SRIOV re-init the driver in default mode
           configure opmode based on op_mode of function
         */
-       if (qlcnic_83xx_configure_opmode(adapter))
+       if (qlcnic_83xx_configure_opmode(adapter)) {
+               rtnl_unlock();
                return -EIO;
+       }
 
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
 
+       rtnl_unlock();
        return 0;
 }
 
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                return -EIO;
        }
 
+       rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                __qlcnic_up(adapter, netdev);
 
 error:
+       rtnl_unlock();
        return err;
 }
 
index c6165d05cc13c3806cf60d9ac7513ffdf5832eed..019f4377307f025df8d7f93d3e473e7811f4101a 100644 (file)
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
 void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 state;
 
        if (device_create_bin_file(dev, &bin_attr_port_stats))
                dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
        if (device_create_bin_file(dev, &bin_attr_mem))
                dev_info(dev, "failed to create mem sysfs entry\n");
 
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+               return;
+
        if (device_create_bin_file(dev, &bin_attr_pci_config))
                dev_info(dev, "failed to create pci config sysfs entry");
+
        if (device_create_file(dev, &dev_attr_beacon))
                dev_info(dev, "failed to create beacon sysfs entry");
 
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 state;
 
        device_remove_bin_file(dev, &bin_attr_port_stats);
 
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
        device_remove_file(dev, &dev_attr_diag_mode);
        device_remove_bin_file(dev, &bin_attr_crb);
        device_remove_bin_file(dev, &bin_attr_mem);
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+               return;
+
        device_remove_bin_file(dev, &bin_attr_pci_config);
        device_remove_file(dev, &dev_attr_beacon);
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
index 89943377846699e1e2719be8b2da16b07b144d09..0c9c4e89559524d78aa789dfb65e5e32211b8bc1 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.32"
+#define DRV_VERSION    "1.00.00.33"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
 extern const char qlge_driver_version[];
 extern const struct ethtool_ops qlge_ethtool_ops;
 
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
-                              u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
-                       u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+                       u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+                u16 q_id);
 void ql_queue_fw_error(struct ql_adapter *qdev);
 void ql_mpi_work(struct work_struct *work);
 void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
-               u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
-               struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+                         int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
 void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
-                       struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
 /* #define QL_OB_DUMP */
 
 #ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
 #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
 #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
 #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
 #endif
 
 #ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
 #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
 #else
 #define QL_DUMP_STAT(qdev)
 #endif
 
 #ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
 #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
 #else
 #define QL_DUMP_QDEV(qdev)
 #endif
 
 #ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
 #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
 #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #endif
 
 #ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
 #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
 #else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #endif
 
 #ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
 #else
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
 #endif
 
 #ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
 #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
 #else
 #define QL_DUMP_ALL(qdev)
index 10093f0c4c0f3d507514da946df9047b3d68d925..6bc5db7039201a1af0a835867e900ed59ab0af13 100644 (file)
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
        int i;
 
        if (!mpi_coredump) {
-               netif_err(qdev, drv, qdev->ndev, "No memory available\n");
-               return -ENOMEM;
+               netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+               return -EINVAL;
        }
 
        /* Try to get the spinlock, but dont worry if
index 2553cf4503b9f83996bb72bd291fb756558644f9..a245dc18d769241bcf23d607458e33538c7ee99c 100644 (file)
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
 
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
        }
 }
 
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+                                 struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                 void *page, size_t *len)
+{
+       u16 *tags;
+
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               return;
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+               tags = (u16 *)page;
+               /* Look for stacked vlan tags in ethertype field */
+               if (tags[6] == ETH_P_8021Q &&
+                   tags[8] == ETH_P_8021Q)
+                       *len += 2 * VLAN_HLEN;
+               else
+                       *len += VLAN_HLEN;
+       }
+}
+
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
                                        struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
        void *addr;
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
+       size_t hlen = ETH_HLEN;
 
        skb = netdev_alloc_skb(ndev, length);
        if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                goto err_out;
        }
 
+       /* Update the MAC header length*/
+       ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
-       if (skb->len > ndev->mtu + ETH_HLEN) {
+       if (skb->len > ndev->mtu + hlen) {
                netif_err(qdev, drv, qdev->ndev,
                          "Segment too small, dropping.\n");
                rx_ring->rx_dropped++;
                goto err_out;
        }
-       memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+       memcpy(skb_put(skb, hlen), addr, hlen);
        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
                     length);
        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-                               lbq_desc->p.pg_chunk.offset+ETH_HLEN,
-                               length-ETH_HLEN);
-       skb->len += length-ETH_HLEN;
-       skb->data_len += length-ETH_HLEN;
-       skb->truesize += length-ETH_HLEN;
+                               lbq_desc->p.pg_chunk.offset + hlen,
+                               length - hlen);
+       skb->len += length - hlen;
+       skb->data_len += length - hlen;
+       skb->truesize += length - hlen;
 
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
                        /* Unfragmented ipv4 UDP frame. */
                        struct iphdr *iph =
-                               (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+                               (struct iphdr *)((u8 *)addr + hlen);
                        if (!(iph->frag_off &
                                htons(IP_MF|IP_OFFSET))) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
        struct bq_desc *sbq_desc;
        struct sk_buff *skb = NULL;
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       size_t hlen = ETH_HLEN;
 
        /*
         * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        skb->data_len += length;
                        skb->truesize += length;
                        length -= length;
-                       __pskb_pull_tail(skb,
-                               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+                       ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+                                             lbq_desc->p.pg_chunk.va,
+                                             &hlen);
+                       __pskb_pull_tail(skb, hlen);
                }
        } else {
                /*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        length -= size;
                        i++;
                }
-               __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+               ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+                                     &hlen);
+               __pskb_pull_tail(skb, hlen);
        }
        return skb;
 }
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
        skb_record_rx_queue(skb, rx_ring->cq_id);
-       if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+       if (vlan_id != 0xffff)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+       u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
 
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
        }
 }
 
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+                                       netdev_features_t features)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status = 0;
+
+       status = ql_adapter_down(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring down the adapter\n");
+               return status;
+       }
+
+       /* update the features with resent change */
+       ndev->features = features;
+
+       status = ql_adapter_up(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring up the adapter\n");
+               return status;
+       }
+       return status;
+}
+
 static netdev_features_t qlge_fix_features(struct net_device *ndev,
        netdev_features_t features)
 {
+       int err;
        /*
         * Since there is no support for separate rx/tx vlan accel
         * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
        else
                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 
+       /* Update the behavior of vlan accel in the adapter */
+       err = qlge_update_hw_vlan_features(ndev, features);
+       if (err)
+               return err;
+
        return features;
 }
 
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
        ql_write32(qdev, SYS, mask | value);
 
        /* Set the default queue, and VLAN behavior. */
-       value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
-       mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+       value = NIC_RCV_CFG_DFQ;
+       mask = NIC_RCV_CFG_DFQ_MASK;
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               value |= NIC_RCV_CFG_RV;
+               mask |= (NIC_RCV_CFG_RV << 16);
+       }
        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
        /* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
                iounmap(qdev->doorbell_area);
        vfree(qdev->mpi_coredump);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
 
        qdev = netdev_priv(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
-       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO_ECN |
-               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
-       ndev->features = ndev->hw_features |
-               NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+       ndev->hw_features = NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO_ECN |
+                           NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_RXCSUM;
+       ndev->features = ndev->hw_features;
        ndev->vlan_features = ndev->hw_features;
 
        if (test_bit(QL_DMA64, &qdev->flags))
index ff2bf8a4e24773429f02e1b66777e76bdd079134..7ad146080c3649585e5c4611c6ba79883bda6293 100644 (file)
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
                return;
        }
 
-       if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+       if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
                netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
                qdev->core_is_dumped = 1;
                queue_delayed_work(qdev->workqueue,
index e9dc84943cfcb5fa39a8031e5a28051f5963da90..1e49ec5b22324815005bcefcfcbd9eef6e46facc 100644 (file)
@@ -1231,7 +1231,6 @@ err_out_mdio:
        mdiobus_free(lp->mii_bus);
 err_out_unmap:
        netif_napi_del(&lp->napi);
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 
index d2e591955bdde3fd03b40dac1c9c22e3d013c23d..f2a2128165dd98b4c41598fb481c4e4f40417614 100644 (file)
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_clear_mwi(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index 3ccedeb8aba03703bcdff318dff2ff888eec4bfa..50a92104dd0a65399f118b6c6505bc04e04329af 100644 (file)
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
        pci_release_regions (pdev);
 
        free_netdev(dev);
-       pci_set_drvdata (pdev, NULL);
 }
 
 
index 3397cee89777ba4be6c888fe9711e928ef209e06..799387570766b6642ad4fdbc13f917acd358887a 100644 (file)
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
        rtl_disable_msi(pdev, tp);
        rtl8169_release_board(pdev, dev, tp->mmio_addr);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops rtl_netdev_ops = {
index 5cd831ebfa83b0a95c472926a0105c521f7c352a..eaf11e47334fbcbca50cc8b6b27084f46f4b47c4 100644 (file)
@@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = {
        .register_type  = SH_ETH_REG_FAST_SH4,
 
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
-       .rmcr_value     = 0x00000001,
+       .rmcr_value     = RMCR_RNC,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
                          EESR_TDE | EESR_ECI,
        .fdr_value      = 0x0000072f,
-       .rmcr_value     = 0x00000001,
+       .rmcr_value     = RMCR_RNC,
 
        .irq_flags      = IRQF_SHARED,
        .apr            = 1,
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
                          EESR_TDE | EESR_ECI,
+       .fdr_value      = 0x0000070f,
+       .rmcr_value     = RMCR_RNC,
 
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
        .bculr          = 1,
        .hw_swap        = 1,
+       .rpadir         = 1,
+       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .tsu            = 1,
@@ -868,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
-               memcpy(ndev->dev_addr, mac, 6);
+               memcpy(ndev->dev_addr, mac, ETH_ALEN);
        } else {
                ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
                ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
index a0db02c63b1157930ca0fca070dd733aa39b7c54..f32c1692d31017186e0e33aaf408eddf7d16a3fa 100644 (file)
@@ -321,6 +321,9 @@ enum TD_STS_BIT {
 #define TD_TFP (TD_TFP1|TD_TFP0)
 
 /* RMCR */
+enum RMCR_BIT {
+       RMCR_RNC = 0x00000001,
+};
 #define DEFAULT_RMCR_VALUE     0x00000000
 
 /* ECMR */
index 9f18ae984f9ed38386b16e5284d4d10864687c4b..676c3c057bfba69e69b6116e212e2d03fdff330c 100644 (file)
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
        return rc;
 }
 
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+       unsigned int i;
+       int rc;
+
+       BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+       for (i = 0; i < nic_data->n_piobufs; i++) {
+               MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+                              nic_data->piobuf_handle[i]);
+               rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+                                 NULL, 0, NULL);
+               WARN_ON(rc);
+       }
+
+       nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+       unsigned int i;
+       size_t outlen;
+       int rc = 0;
+
+       BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+       for (i = 0; i < n; i++) {
+               rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+                                 outbuf, sizeof(outbuf), &outlen);
+               if (rc)
+                       break;
+               if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+                       rc = -EIO;
+                       break;
+               }
+               nic_data->piobuf_handle[i] =
+                       MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+               netif_dbg(efx, probe, efx->net_dev,
+                         "allocated PIO buffer %u handle %x\n", i,
+                         nic_data->piobuf_handle[i]);
+       }
+
+       nic_data->n_piobufs = i;
+       if (rc)
+               efx_ef10_free_piobufs(efx);
+       return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf,
+                        max(MC_CMD_LINK_PIOBUF_IN_LEN,
+                            MC_CMD_UNLINK_PIOBUF_IN_LEN));
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       unsigned int offset, index;
+       int rc;
+
+       BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+       BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+       /* Link a buffer to each VI in the write-combining mapping */
+       for (index = 0; index < nic_data->n_piobufs; ++index) {
+               MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+                              nic_data->piobuf_handle[index]);
+               MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+                              nic_data->pio_write_vi_base + index);
+               rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+                                 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+                                 NULL, 0, NULL);
+               if (rc) {
+                       netif_err(efx, drv, efx->net_dev,
+                                 "failed to link VI %u to PIO buffer %u (%d)\n",
+                                 nic_data->pio_write_vi_base + index, index,
+                                 rc);
+                       goto fail;
+               }
+               netif_dbg(efx, probe, efx->net_dev,
+                         "linked VI %u to PIO buffer %u\n",
+                         nic_data->pio_write_vi_base + index, index);
+       }
+
+       /* Link a buffer to each TX queue */
+       efx_for_each_channel(channel, efx) {
+               efx_for_each_channel_tx_queue(tx_queue, channel) {
+                       /* We assign the PIO buffers to queues in
+                        * reverse order to allow for the following
+                        * special case.
+                        */
+                       offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+                                  tx_queue->channel->channel - 1) *
+                                 efx_piobuf_size);
+                       index = offset / ER_DZ_TX_PIOBUF_SIZE;
+                       offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+                       /* When the host page size is 4K, the first
+                        * host page in the WC mapping may be within
+                        * the same VI page as the last TX queue.  We
+                        * can only link one buffer to each VI.
+                        */
+                       if (tx_queue->queue == nic_data->pio_write_vi_base) {
+                               BUG_ON(index != 0);
+                               rc = 0;
+                       } else {
+                               MCDI_SET_DWORD(inbuf,
+                                              LINK_PIOBUF_IN_PIOBUF_HANDLE,
+                                              nic_data->piobuf_handle[index]);
+                               MCDI_SET_DWORD(inbuf,
+                                              LINK_PIOBUF_IN_TXQ_INSTANCE,
+                                              tx_queue->queue);
+                               rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+                                                 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+                                                 NULL, 0, NULL);
+                       }
+
+                       if (rc) {
+                               /* This is non-fatal; the TX path just
+                                * won't use PIO for this queue
+                                */
+                               netif_err(efx, drv, efx->net_dev,
+                                         "failed to link VI %u to PIO buffer %u (%d)\n",
+                                         tx_queue->queue, index, rc);
+                               tx_queue->piobuf = NULL;
+                       } else {
+                               tx_queue->piobuf =
+                                       nic_data->pio_write_base +
+                                       index * EFX_VI_PAGE_SIZE + offset;
+                               tx_queue->piobuf_offset = offset;
+                               netif_dbg(efx, probe, efx->net_dev,
+                                         "linked VI %u to PIO buffer %u offset %x addr %p\n",
+                                         tx_queue->queue, index,
+                                         tx_queue->piobuf_offset,
+                                         tx_queue->piobuf);
+                       }
+               }
+       }
+
+       return 0;
+
+fail:
+       while (index--) {
+               MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+                              nic_data->pio_write_vi_base + index);
+               efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+                            inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+                            NULL, 0, NULL);
+       }
+       return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+       return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+       return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
 static void efx_ef10_remove(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
        /* This needs to be after efx_ptp_remove_channel() with no filters */
        efx_ef10_rx_free_indir_table(efx);
 
+       if (nic_data->wc_membase)
+               iounmap(nic_data->wc_membase);
+
        rc = efx_ef10_free_vis(efx);
        WARN_ON(rc != 0);
 
+       if (!nic_data->must_restore_piobufs)
+               efx_ef10_free_piobufs(efx);
+
        efx_mcdi_fini(efx);
        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
        kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
        return 0;
 }
 
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
 static int efx_ef10_dimension_resources(struct efx_nic *efx)
 {
-       unsigned int n_vis =
-               max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       unsigned int uc_mem_map_size, wc_mem_map_size;
+       unsigned int min_vis, pio_write_vi_base, max_vis;
+       void __iomem *membase;
+       int rc;
+
+       min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+       /* Try to allocate PIO buffers if wanted and if the full
+        * number of PIO buffers would be sufficient to allocate one
+        * copy-buffer per TX channel.  Failure is non-fatal, as there
+        * are only a small number of PIO buffers shared between all
+        * functions of the controller.
+        */
+       if (efx_piobuf_size != 0 &&
+           ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+           efx->n_tx_channels) {
+               unsigned int n_piobufs =
+                       DIV_ROUND_UP(efx->n_tx_channels,
+                                    ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+               rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+               if (rc)
+                       netif_err(efx, probe, efx->net_dev,
+                                 "failed to allocate PIO buffers (%d)\n", rc);
+               else
+                       netif_dbg(efx, probe, efx->net_dev,
+                                 "allocated %u PIO buffers\n", n_piobufs);
+       }
+#else
+       nic_data->n_piobufs = 0;
+#endif
+
+       /* PIO buffers should be mapped with write-combining enabled,
+        * and we want to make single UC and WC mappings rather than
+        * several of each (in fact that's the only option if host
+        * page size is >4K).  So we may allocate some extra VIs just
+        * for writing PIO buffers through.
+        */
+       uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+                                    ER_DZ_TX_PIOBUF);
+       if (nic_data->n_piobufs) {
+               pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+               wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+                                              nic_data->n_piobufs) *
+                                             EFX_VI_PAGE_SIZE) -
+                                  uc_mem_map_size);
+               max_vis = pio_write_vi_base + nic_data->n_piobufs;
+       } else {
+               pio_write_vi_base = 0;
+               wc_mem_map_size = 0;
+               max_vis = min_vis;
+       }
+
+       /* In case the last attached driver failed to free VIs, do it now */
+       rc = efx_ef10_free_vis(efx);
+       if (rc != 0)
+               return rc;
+
+       rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+       if (rc != 0)
+               return rc;
+
+       /* If we didn't get enough VIs to map all the PIO buffers, free the
+        * PIO buffers
+        */
+       if (nic_data->n_piobufs &&
+           nic_data->n_allocated_vis <
+           pio_write_vi_base + nic_data->n_piobufs) {
+               netif_dbg(efx, probe, efx->net_dev,
+                         "%u VIs are not sufficient to map %u PIO buffers\n",
+                         nic_data->n_allocated_vis, nic_data->n_piobufs);
+               efx_ef10_free_piobufs(efx);
+       }
+
+       /* Shrink the original UC mapping of the memory BAR */
+       membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+       if (!membase) {
+               netif_err(efx, probe, efx->net_dev,
+                         "could not shrink memory BAR to %x\n",
+                         uc_mem_map_size);
+               return -ENOMEM;
+       }
+       iounmap(efx->membase);
+       efx->membase = membase;
+
+       /* Set up the WC mapping if needed */
+       if (wc_mem_map_size) {
+               nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+                                                 uc_mem_map_size,
+                                                 wc_mem_map_size);
+               if (!nic_data->wc_membase) {
+                       netif_err(efx, probe, efx->net_dev,
+                                 "could not allocate WC mapping of size %x\n",
+                                 wc_mem_map_size);
+                       return -ENOMEM;
+               }
+               nic_data->pio_write_vi_base = pio_write_vi_base;
+               nic_data->pio_write_base =
+                       nic_data->wc_membase +
+                       (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+                        uc_mem_map_size);
 
-       return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+               rc = efx_ef10_link_piobufs(efx);
+               if (rc)
+                       efx_ef10_free_piobufs(efx);
+       }
+
+       netif_dbg(efx, probe, efx->net_dev,
+                 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+                 &efx->membase_phys, efx->membase, uc_mem_map_size,
+                 nic_data->wc_membase, wc_mem_map_size);
+
+       return 0;
 }
 
 static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                nic_data->must_realloc_vis = false;
        }
 
+       if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+               rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+               if (rc == 0) {
+                       rc = efx_ef10_link_piobufs(efx);
+                       if (rc)
+                               efx_ef10_free_piobufs(efx);
+               }
+
+               /* Log an error on failure, but this is non-fatal */
+               if (rc)
+                       netif_err(efx, drv, efx->net_dev,
+                                 "failed to restore PIO buffers (%d)\n", rc);
+               nic_data->must_restore_piobufs = false;
+       }
+
        efx_ef10_rx_push_indir_table(efx);
        return 0;
 }
@@ -444,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+       EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+       EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+       EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+       EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+       EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+       EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+       EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |          \
@@ -498,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
                                  (1ULL << EF10_STAT_rx_length_error))
 
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
-       static const unsigned long hunt_40g_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_40G_EXTRA_STAT_MASK)
-       };
-       static const unsigned long hunt_10g_only_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_10G_ONLY_STAT_MASK)
-       };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK (                                   \
+       (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |                   \
+       (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |                 \
+       (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |                    \
+       (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |                  \
+       (1ULL << EF10_STAT_rx_pm_trunc_qbb) |                           \
+       (1ULL << EF10_STAT_rx_pm_discard_qbb) |                         \
+       (1ULL << EF10_STAT_rx_pm_discard_mapping) |                     \
+       (1ULL << EF10_STAT_rx_dp_q_disabled_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_di_dropped_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_streaming_packets) |                   \
+       (1ULL << EF10_STAT_rx_dp_emerg_fetch) |                         \
+       (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+       u64 raw_mask = HUNT_COMMON_STAT_MASK;
        u32 port_caps = efx_mcdi_phy_get_caps(efx);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-               return hunt_40g_stat_mask;
+               raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
        else
-               return hunt_10g_only_stat_mask;
+               raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+       if (nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+               raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+       return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+       u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+       mask[0] = raw_mask;
+#else
+       mask[0] = raw_mask & 0xffffffff;
+       mask[1] = raw_mask >> 32;
+#endif
 }
 
 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
 {
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+       efx_ef10_get_stat_mask(efx, mask);
        return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
-                                     efx_ef10_stat_mask(efx), names);
+                                     mask, names);
 }
 
 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
        __le64 *dma_stats;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        dma_stats = efx->stats_buffer.addr;
        nic_data = efx->nic_data;
 
@@ -543,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
-       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
                             stats, efx->stats_buffer.addr, false);
+       rmb();
        generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
        if (generation_end != generation_start)
                return -EAGAIN;
@@ -563,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
                                    struct rtnl_link_stats64 *core_stats)
 {
-       const unsigned long *mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        u64 *stats = nic_data->stats;
        size_t stats_count = 0, index;
        int retry;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        /* If we're unlucky enough to read statistics during the DMA, wait
         * up to 10ms for it to finish (typically takes <500us)
         */
@@ -716,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
        /* All our allocations have been reset */
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
+       nic_data->must_restore_piobufs = true;
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
        /* The datapath firmware might have been changed */
@@ -2137,7 +2491,7 @@ out_unlock:
        return rc;
 }
 
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
        /* no need to do anything here on EF10 */
 }
index b3f4e3755fd97cd7c38130597d54e22c20b591d6..207ac9a1e3de989d0f5e4d588c27b6035eec31ec 100644 (file)
 #define        ESF_DZ_TX_PIO_TYPE_WIDTH 1
 #define        ESF_DZ_TX_PIO_OPT_LBN 60
 #define        ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define        ESE_DZ_TX_OPTION_DESC_PIO 1
 #define        ESF_DZ_TX_PIO_CONT_LBN 59
 #define        ESF_DZ_TX_PIO_CONT_WIDTH 1
 #define        ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
index 07c9bc4c61bc0d15484e79798b36720eff8ff741..2e27837ce6a289dc033c806e613f8dbca42496b9 100644 (file)
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
         */
        while (dma_mask > 0x7fffffffUL) {
                if (dma_supported(&pci_dev->dev, dma_mask)) {
-                       rc = dma_set_mask(&pci_dev->dev, dma_mask);
+                       rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
                        if (rc == 0)
                                break;
                }
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
        }
        netif_dbg(efx, probe, efx->net_dev,
                  "using DMA mask %llx\n", (unsigned long long) dma_mask);
-       rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
-       if (rc) {
-               /* dma_set_coherent_mask() is not *allowed* to
-                * fail with a mask that dma_set_mask() accepted,
-                * but just in case...
-                */
-               netif_err(efx, probe, efx->net_dev,
-                         "failed to set consistent DMA mask\n");
-               goto fail2;
-       }
 
        efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
        rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
index 34d00f5771fe4c521177f17061acb9abac846c50..b8235ee5d7d739ae879aaba810620740951af8ff 100644 (file)
 #define EFX_MEM_BAR 2
 
 /* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+                               struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
 
 /* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
-                         unsigned int index, unsigned int n_frags,
-                         unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+                  unsigned int n_frags, unsigned int len, u16 flags);
 static inline void efx_rx_flush_packet(struct efx_channel *channel)
 {
        if (channel->rx_pkt_n_frags)
                __efx_rx_packet(channel);
 }
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 
 #define EFX_MAX_DMAQ_SIZE 4096UL
 #define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
        return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 }
 #ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                         u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
 static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 {
        if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 #define efx_filter_rfs_enabled() 0
 #endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 
 /* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
 
 /* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
 
 /* Ethtool support */
 extern const struct ethtool_ops efx_ethtool_ops;
 
 /* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
 
 /* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
-                                  unsigned int rx_usecs, bool rx_adaptive,
-                                  bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
-                                  unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+                           unsigned int rx_usecs, bool rx_adaptive,
+                           bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+                           unsigned int *rx_usecs, bool *rx_adaptive);
 
 /* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
 
 /* MTD */
 #ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
-                      size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+               size_t n_parts, size_t sizeof_part);
 static inline int efx_mtd_probe(struct efx_nic *efx)
 {
        return efx->type->mtd_probe(efx);
 }
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
 #else
 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
        efx_schedule_channel(channel);
 }
 
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
 {
index 5b471cf5c323a2aabae6ae3c55404194e6f90c42..1f529fa2edb10008a5c03944bea2fb0d995e1e61 100644 (file)
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
        EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
        EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+       EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
        EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
        return 0;
 }
 
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
-                           struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+                                  struct ethtool_ts_info *ts_info)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
index 96ce507d8602eb4a8f40a7cfa034f5cf3dede3c3..4d3f119b67b38ec35719ebac40d59f49ea9b844f 100644 (file)
 #define EFX_USE_QWORD_IO 1
 #endif
 
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
 #ifdef EFX_USE_QWORD_IO
 static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
                                  unsigned int reg)
index 128d7cdf9eb207583afc36dc8706ef3503b3f5a5..366c8e3e37844c8e2d8840a4662467067e937b3b 100644 (file)
 
 /* A reboot/assertion causes the MCDI status word to be set after the
  * command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 20ms for the status word to be set.
+ * via these mechanisms then wait 250ms for the status word to be set.
  */
 #define MCDI_STATUS_DELAY_US           100
-#define MCDI_STATUS_DELAY_COUNT                200
+#define MCDI_STATUS_DELAY_COUNT                2500
 #define MCDI_STATUS_SLEEP_MS                                           \
        (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
 
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
        } else {
                int count;
 
-               /* Nobody was waiting for an MCDI request, so trigger a reset */
-               efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
-
                /* Consume the status word since efx_mcdi_rpc_finish() won't */
                for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
                        if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
                        udelay(MCDI_STATUS_DELAY_US);
                }
                mcdi->new_epoch = true;
+
+               /* Nobody was waiting for an MCDI request, so trigger a reset */
+               efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
        }
 
        spin_unlock(&mcdi->iface_lock);
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                               bool *was_attached)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
        size_t outlen;
        int rc;
 
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                goto fail;
        }
 
+       /* We currently assume we have control of the external link
+        * and are completely trusted by firmware.  Abort probing
+        * if that's not true for this function.
+        */
+       if (driver_operating &&
+           outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+           (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+            (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+             1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+           (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+            1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+               netif_err(efx, probe, efx->net_dev,
+                         "This driver version only supports one function per port\n");
+               return -ENODEV;
+       }
+
        if (was_attached != NULL)
                *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
        return 0;
index c34d0d4e10ee2e0d453b3f192acaed86c5d75301..656a3277c2b210e69ffd028d059ce10b809e8db8 100644 (file)
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
 }
 #endif
 
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
 
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-                       const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+                size_t inlen, efx_dword_t *outbuf, size_t outlen,
+                size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+                      const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
                        efx_dword_t *outbuf, size_t outlen,
                        size_t *outlen_actual);
 
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
-                             const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
-                              efx_dword_t *outbuf, size_t outlen,
-                              size_t *outlen_actual);
-
 typedef void efx_mcdi_async_completer(struct efx_nic *efx,
                                      unsigned long cookie, int rc,
                                      efx_dword_t *outbuf,
                                      size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
-                             const efx_dword_t *inbuf, size_t inlen,
-                             size_t outlen,
-                             efx_mcdi_async_completer *complete,
-                             unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+                      const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+                      efx_mcdi_async_completer *complete,
+                      unsigned long cookie);
 
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
 
-extern void efx_mcdi_process_event(struct efx_channel *channel,
-                                  efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 
 /* We expect that 16- and 32-bit fields in MCDI requests and responses
  * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
-                                 u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
-                            u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
-                              size_t *size_out, size_t *erase_size_out,
-                              bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
-                                        const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+                          u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+                       size_t *size_out, size_t *erase_size_out,
+                       bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+                                 int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
 
 #ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
 #else
 static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
 #endif
 
 #ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
-                            size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
-                             size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+                     size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+                      size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
 #endif
 
 #endif /* EFX_MCDI_H */
index b5cf62492f8e77ff5bf2a8f0504ae7df4cf8c2ef..e0a63ddb7a6ceb1246dc9a7a7349ce7efffa0d9e 100644 (file)
 #define          MC_CMD_MAC_RX_LANES01_DISP_ERR  0x39 /* enum */
 #define          MC_CMD_MAC_RX_LANES23_DISP_ERR  0x3a /* enum */
 #define          MC_CMD_MAC_RX_MATCH_FAULT  0x3b /* enum */
-#define          MC_CMD_GMAC_DMABUF_START  0x40 /* enum */
-#define          MC_CMD_GMAC_DMABUF_END    0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW  0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW  0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_VFIFO_FULL  0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_VFIFO_FULL  0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_QBB  0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_QBB  0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_MAPPING  0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_Q_DISABLED_PKTS  0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_DI_DROPPED_PKTS  0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS  0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS  0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_START  0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_END    0x5f
 #define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
index 16824fecc5ee5c652a34179aeba4130c93fbc5fb..4a2dc4c281b730fd3b415c4714657c8ead4bdad7 100644 (file)
@@ -20,7 +20,7 @@
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
 
 static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
 {
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
        return sync;
 }
 
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
 
 /*
  * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
  *
  * This function will sleep
  */
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
-                             int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
 int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
 
 /* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
 
 /* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
-                                    int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+                             unsigned int mmd_mask);
 
 /* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
 
 /* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
 
 /* Get pause parameters from AN if available (otherwise return
  * requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
 u8 efx_mdio_get_pause(struct efx_nic *efx);
 
 /* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
-                                   unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Set or clear flag, debouncing */
 static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
 }
 
 /* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
 
 #endif /* EFX_MDIO_10G_H */
index b172ed13305554cf6009fc445ac4c09123b97550..aac22a1e85b8e081f5c83723721c637161ba1e46 100644 (file)
@@ -182,6 +182,9 @@ struct efx_tx_buffer {
  * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ *     Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
  * @initialised: Has hardware queue been initialised?
  * @read_count: Current read pointer.
  *     This is the number of buffers that have been removed from both rings.
@@ -209,6 +212,7 @@ struct efx_tx_buffer {
  *     blocks
  * @tso_packets: Number of packets via the TSO xmit path
  * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
  * @empty_read_count: If the completion path has seen the queue as empty
  *     and the transmission path has not yet checked this, the value of
  *     @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +227,8 @@ struct efx_tx_queue {
        struct efx_buffer *tsoh_page;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
+       void __iomem *piobuf;
+       unsigned int piobuf_offset;
        bool initialised;
 
        /* Members used mainly on the completion path */
@@ -238,6 +244,7 @@ struct efx_tx_queue {
        unsigned int tso_long_headers;
        unsigned int tso_packets;
        unsigned int pushes;
+       unsigned int pio_packets;
 
        /* Members shared between paths and sometimes updated */
        unsigned int empty_read_count ____cacheline_aligned_in_smp;
index e7dbd2dd202e8bb7332b83560f1fa93d56071674..9c90bf56090f604b4b4df77fb12b0353c922b6e2 100644 (file)
@@ -19,6 +19,7 @@
 #include "bitfield.h"
 #include "efx.h"
 #include "nic.h"
+#include "ef10_regs.h"
 #include "farch_regs.h"
 #include "io.h"
 #include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
 
 /* Register dump */
 
-#define REGISTER_REVISION_A    1
-#define REGISTER_REVISION_B    2
-#define REGISTER_REVISION_C    3
-#define REGISTER_REVISION_Z    3       /* latest revision */
+#define REGISTER_REVISION_FA   1
+#define REGISTER_REVISION_FB   2
+#define REGISTER_REVISION_FC   3
+#define REGISTER_REVISION_FZ   3       /* last Falcon arch revision */
+#define REGISTER_REVISION_ED   4
+#define REGISTER_REVISION_EZ   4       /* latest EF10 revision */
 
 struct efx_nic_reg {
        u32 offset:24;
-       u32 min_revision:2, max_revision:2;
+       u32 min_revision:3, max_revision:3;
 };
 
-#define REGISTER(name, min_rev, max_rev) {                             \
-       FR_ ## min_rev ## max_rev ## _ ## name,                         \
-       REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev    \
+#define REGISTER(name, arch, min_rev, max_rev) {                       \
+       arch ## R_ ## min_rev ## max_rev ## _ ## name,                  \
+       REGISTER_REVISION_ ## arch ## min_rev,                          \
+       REGISTER_REVISION_ ## arch ## max_rev                           \
 }
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
 
 static const struct efx_nic_reg efx_nic_regs[] = {
        REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
        REGISTER_AB(XX_TXDRV_CTL),
        /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
        /* XX_CORE_STAT is partly RC */
+       REGISTER_DZ(BIU_HW_REV_ID),
+       REGISTER_DZ(MC_DB_LWRD),
+       REGISTER_DZ(MC_DB_HWRD),
 };
 
 struct efx_nic_reg_table {
        u32 offset:24;
-       u32 min_revision:2, max_revision:2;
+       u32 min_revision:3, max_revision:3;
        u32 step:6, rows:21;
 };
 
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
        offset,                                                         \
-       REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,   \
+       REGISTER_REVISION_ ## arch ## min_rev,                          \
+       REGISTER_REVISION_ ## arch ## max_rev,                          \
        step, rows                                                      \
 }
-#define REGISTER_TABLE(name, min_rev, max_rev)                         \
+#define REGISTER_TABLE(name, arch, min_rev, max_rev)                   \
        REGISTER_TABLE_DIMENSIONS(                                      \
-               name, FR_ ## min_rev ## max_rev ## _ ## name,           \
-               min_rev, max_rev,                                       \
-               FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,        \
-               FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+               name, arch ## R_ ## min_rev ## max_rev ## _ ## name,    \
+               arch, min_rev, max_rev,                                 \
+               arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+               arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
 #define REGISTER_TABLE_BB_CZ(name)                                     \
-       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,           \
+       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B,        \
                                  FR_BZ_ ## name ## _STEP,              \
                                  FR_BB_ ## name ## _ROWS),             \
-       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,           \
+       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z,        \
                                  FR_BZ_ ## name ## _STEP,              \
                                  FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
 
 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        /* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
         * 1K entries allows for some expansion of queue count and
         * size before we need to change the version. */
        REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
-                                 A, A, 8, 1024),
+                                 F, A, A, 8, 1024),
        REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
-                                 B, Z, 8, 1024),
+                                 F, B, Z, 8, 1024),
        REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
        REGISTER_TABLE_BB_CZ(TIMER_TBL),
        REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        /* MSIX_PBA_TABLE is not mapped */
        /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
        REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+       REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
 };
 
 size_t efx_nic_get_regs_len(struct efx_nic *efx)
@@ -469,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
  * @count: Length of the @desc array
  * @mask: Bitmask of which elements of @desc are enabled
  * @stats: Buffer to update with the converted statistics.  The length
- *     of this array must be at least the number of set bits in the
- *     first @count bits of @mask.
+ *     of this array must be at least @count.
  * @dma_buf: DMA buffer containing hardware statistics
  * @accumulate: If set, the converted values will be added rather than
  *     directly stored to the corresponding elements of @stats
@@ -503,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
                        }
 
                        if (accumulate)
-                               *stats += val;
+                               stats[index] += val;
                        else
-                               *stats = val;
+                               stats[index] = val;
                }
-
-               ++stats;
        }
 }
index fda29d39032f422d2c395a4fe68a4c5091654006..11b6112d9249a734701eebe66414bd039702bcdd 100644 (file)
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
        return efx->type->revision;
 }
 
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
 
 /* NIC has two interlinked PCI functions for the same port. */
 static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
        return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
 }
 
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push.  May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+                                        unsigned int write_count)
+{
+       unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+       if (empty_read_count == 0)
+               return false;
+
+       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+       return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
 /* Decide whether to push a TX descriptor to the NIC vs merely writing
  * the doorbell.  This can reduce latency when we are adding a single
  * descriptor to an empty queue, but is otherwise pointless.  Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
                                            unsigned int write_count)
 {
-       unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
-       if (empty_read_count == 0)
-               return false;
+       bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
 
        tx_queue->empty_read_count = 0;
-       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
-               && tx_queue->write_count - write_count == 1;
+       return was_empty && tx_queue->write_count - write_count == 1;
 }
 
 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -386,9 +402,27 @@ enum {
        EF10_STAT_rx_align_error,
        EF10_STAT_rx_length_error,
        EF10_STAT_rx_nodesc_drops,
+       EF10_STAT_rx_pm_trunc_bb_overflow,
+       EF10_STAT_rx_pm_discard_bb_overflow,
+       EF10_STAT_rx_pm_trunc_vfifo_full,
+       EF10_STAT_rx_pm_discard_vfifo_full,
+       EF10_STAT_rx_pm_trunc_qbb,
+       EF10_STAT_rx_pm_discard_qbb,
+       EF10_STAT_rx_pm_discard_mapping,
+       EF10_STAT_rx_dp_q_disabled_packets,
+       EF10_STAT_rx_dp_di_dropped_packets,
+       EF10_STAT_rx_dp_streaming_packets,
+       EF10_STAT_rx_dp_emerg_fetch,
+       EF10_STAT_rx_dp_emerg_wait,
        EF10_STAT_COUNT
 };
 
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
 /**
  * struct efx_ef10_nic_data - EF10 architecture NIC state
  * @mcdi_buf: DMA buffer for MCDI
@@ -397,6 +431,13 @@ enum {
  * @n_allocated_vis: Number of VIs allocated to this function
  * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
  * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ *     reboot
  * @rx_rss_context: Firmware handle for our RSS context
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -412,6 +453,11 @@ struct efx_ef10_nic_data {
        unsigned int n_allocated_vis;
        bool must_realloc_vis;
        bool must_restore_filters;
+       unsigned int n_piobufs;
+       void __iomem *wc_membase, *pio_write_base;
+       unsigned int pio_write_vi_base;
+       unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+       bool must_restore_piobufs;
        u32 rx_rss_context;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
@@ -463,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
        return 1 << efx->vi_scale;
 }
 
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
 
 #else
 
@@ -500,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
 
 #endif
 
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
-                                u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
-                                  struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-                                    bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+                           struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+                             bool spoofchk);
 
 struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
-                               struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
@@ -529,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
  **************************************************************************
  */
 
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -597,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
 {
        channel->efx->type->ev_read_ack(channel);
 }
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
 
 /* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
 
 /* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
-                                  struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
-                                       enum efx_filter_priority priority,
-                                       u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
-                                    enum efx_filter_priority priority,
-                                    u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
-                                     enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
-                                         enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
-                                      enum efx_filter_priority priority,
-                                      u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+                           bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+                                enum efx_filter_priority priority,
+                                u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+                             enum efx_filter_priority priority, u32 filter_id,
+                             struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+                              enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+                                  enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+                               enum efx_filter_priority priority, u32 *buf,
+                               u32 size);
 #ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
-                                      struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
-                                           unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+                               struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+                                    unsigned int index);
 #endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
 
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
 
 /* Some statistics are computed as A - B where A and B each increase
  * linearly with some hardware counter(s) and the counters are read
@@ -669,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
 }
 
 /* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
 
 /* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
 {
@@ -691,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
 }
 
 /* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
 static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 {
        efx->type->rx_push_indir_table(efx);
 }
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
                         unsigned int len, gfp_t gfp_flags);
@@ -716,24 +760,22 @@ struct efx_farch_register_test {
        unsigned address;
        efx_oword_t mask;
 };
-extern int efx_farch_test_registers(struct efx_nic *efx,
-                                   const struct efx_farch_register_test *regs,
-                                   size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+                            const struct efx_farch_register_test *regs,
+                            size_t n_regs);
 
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
 
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                      const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                    const unsigned long *mask,
-                    u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                             const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                         const unsigned long *mask, u64 *stats,
+                         const void *dma_buf, bool accumulate);
 
 #define EFX_MAX_FLUSH_TIME 5000
 
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
-                                    efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+                             efx_qword_t *event);
 
 #endif /* EFX_NIC_H */
index 45eeb70751562651b3a361e7b61f4ea4469b381b..803bf445c08e22df119c8274816cb26cf0b51a2b 100644 (file)
@@ -15,7 +15,7 @@
  */
 extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
 
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
 
 /****************************************************************************
  * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
 #define QUAKE_LED_TXLINK       (0)
 #define QUAKE_LED_RXLINK       (8)
 
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
 
 /****************************************************************************
 * Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
 #define TXC_GPIO_DIR_INPUT     0
 #define TXC_GPIO_DIR_OUTPUT    1
 
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
 
 #endif
index 4a596725023f07284ef66b3bb2cc337e92a0d69e..8f09e686fc2392a80f56610c78a61c6374b4a410 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_channel *channel;
        struct efx_filter_spec spec;
-       const struct iphdr *ip;
        const __be16 *ports;
+       __be16 ether_type;
        int nhoff;
        int rc;
 
-       nhoff = skb_network_offset(skb);
+       /* The core RPS/RFS code has already parsed and validated
+        * VLAN, IP and transport headers.  We assume they are in the
+        * header area.
+        */
 
        if (skb->protocol == htons(ETH_P_8021Q)) {
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-                                   nhoff + sizeof(struct vlan_hdr));
-               if (((const struct vlan_hdr *)skb->data + nhoff)->
-                   h_vlan_encapsulated_proto != htons(ETH_P_IP))
-                       return -EPROTONOSUPPORT;
+               const struct vlan_hdr *vh =
+                       (const struct vlan_hdr *)skb->data;
 
-               /* This is IP over 802.1q VLAN.  We can't filter on the
-                * IP 5-tuple and the vlan together, so just strip the
-                * vlan header and filter on the IP part.
+               /* We can't filter on the IP 5-tuple and the vlan
+                * together, so just strip the vlan header and filter
+                * on the IP part.
                 */
-               nhoff += sizeof(struct vlan_hdr);
-       } else if (skb->protocol != htons(ETH_P_IP)) {
-               return -EPROTONOSUPPORT;
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+               ether_type = vh->h_vlan_encapsulated_proto;
+               nhoff = sizeof(struct vlan_hdr);
+       } else {
+               ether_type = skb->protocol;
+               nhoff = 0;
        }
 
-       /* RFS must validate the IP header length before calling us */
-       EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-       ip = (const struct iphdr *)(skb->data + nhoff);
-       if (ip_is_fragment(ip))
+       if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
-       EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-       ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
 
        efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
                           efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
                           rxq_index);
-       rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
-                                     ip->daddr, ports[1], ip->saddr, ports[0]);
-       if (rc)
-               return rc;
+       spec.match_flags =
+               EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+               EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+       spec.ether_type = ether_type;
+
+       if (ether_type == htons(ETH_P_IP)) {
+               const struct iphdr *ip =
+                       (const struct iphdr *)(skb->data + nhoff);
+
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+               if (ip_is_fragment(ip))
+                       return -EPROTONOSUPPORT;
+               spec.ip_proto = ip->protocol;
+               spec.rem_host[0] = ip->saddr;
+               spec.loc_host[0] = ip->daddr;
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+               ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+       } else {
+               const struct ipv6hdr *ip6 =
+                       (const struct ipv6hdr *)(skb->data + nhoff);
+
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+                                   nhoff + sizeof(*ip6) + 4);
+               spec.ip_proto = ip6->nexthdr;
+               memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+               memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+               ports = (const __be16 *)(ip6 + 1);
+       }
+
+       spec.rem_port = ports[0];
+       spec.loc_port = ports[1];
 
        rc = efx->type->filter_rfs_insert(efx, &spec);
        if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        channel = efx_get_channel(efx, skb_get_rx_queue(skb));
        ++channel->rfs_filters_added;
 
-       netif_info(efx, rx_status, efx->net_dev,
-                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
-                  (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
-                  &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
-                  rxq_index, flow_id, rc);
+       if (ether_type == htons(ETH_P_IP))
+               netif_info(efx, rx_status, efx->net_dev,
+                          "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+                          (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
+                          ntohs(ports[1]), rxq_index, flow_id, rc);
+       else
+               netif_info(efx, rx_status, efx->net_dev,
+                          "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+                          (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
+                          ntohs(ports[1]), rxq_index, flow_id, rc);
 
        return rc;
 }
index 87698ae0bf75f7dab78ca093ea2c3f3b3f1774d5..a2f4a06ffa4e4d8a6cb6829b45b8397ea10d4283 100644 (file)
@@ -43,13 +43,12 @@ struct efx_self_tests {
        struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
 };
 
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
-                                  const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
-                       struct efx_self_tests *tests,
-                       unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+                           int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+                unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
 
 #endif /* EFX_SELFTEST_H */
index 2ac91c5b5eeae4e7a210e5b3693c9f01a04c9f9c..282692c48e6b6de94949e814ecdadf848186edc6 100644 (file)
 #include <net/ipv6.h>
 #include <linux/if_ether.h>
 #include <linux/highmem.h>
+#include <linux/cache.h>
 #include "net_driver.h"
 #include "efx.h"
+#include "io.h"
 #include "nic.h"
 #include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+       return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+       return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+       struct efx_tx_buffer *buffer =
+               __efx_tx_queue_get_insert_buffer(tx_queue);
+
+       EFX_BUG_ON_PARANOID(buffer->len);
+       EFX_BUG_ON_PARANOID(buffer->flags);
+       EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+       return buffer;
+}
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                               struct efx_tx_buffer *buffer,
@@ -83,8 +119,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
         */
        unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
 
-       /* Possibly one more per segment for the alignment workaround */
-       if (EFX_WORKAROUND_5391(efx))
+       /* Possibly one more per segment for the alignment workaround,
+        * or for option descriptors
+        */
+       if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
                max_descs += EFX_TSO_MAX_SEGS;
 
        /* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +183,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
        }
 }
 
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+       int used;
+       u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+                                   u8 *data, int len,
+                                   struct efx_short_copy_buffer *copy_buf)
+{
+       int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+       memcpy_toio(*piobuf, data, block_len);
+       *piobuf += block_len;
+       len -= block_len;
+
+       if (len) {
+               data += block_len;
+               BUG_ON(copy_buf->used);
+               BUG_ON(len > sizeof(copy_buf->buf));
+               memcpy(copy_buf->buf, data, len);
+               copy_buf->used = len;
+       }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+                                      u8 *data, int len,
+                                      struct efx_short_copy_buffer *copy_buf)
+{
+       if (copy_buf->used) {
+               /* if the copy buffer is partially full, fill it up and write */
+               int copy_to_buf =
+                       min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+               memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+               copy_buf->used += copy_to_buf;
+
+               /* if we didn't fill it up then we're done for now */
+               if (copy_buf->used < sizeof(copy_buf->buf))
+                       return;
+
+               memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+               *piobuf += sizeof(copy_buf->buf);
+               data += copy_to_buf;
+               len -= copy_to_buf;
+               copy_buf->used = 0;
+       }
+
+       efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+                                 struct efx_short_copy_buffer *copy_buf)
+{
+       /* if there's anything in it, write the whole buffer, including junk */
+       if (copy_buf->used)
+               memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+                                    u8 __iomem **piobuf,
+                                    struct efx_short_copy_buffer *copy_buf)
+{
+       int i;
+
+       efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+                               copy_buf);
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+               u8 *vaddr;
+
+               vaddr = kmap_atomic(skb_frag_page(f));
+
+               efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+                                          skb_frag_size(f), copy_buf);
+               kunmap_atomic(vaddr);
+       }
+
+       EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+       struct efx_tx_buffer *buffer =
+               efx_tx_queue_get_insert_buffer(tx_queue);
+       u8 __iomem *piobuf = tx_queue->piobuf;
+
+       /* Copy to PIO buffer. Ensure the writes are padded to the end
+        * of a cache line, as this is required for write-combining to be
+        * effective on at least x86.
+        */
+
+       if (skb_shinfo(skb)->nr_frags) {
+               /* The size of the copy buffer will ensure all writes
+                * are the size of a cache line.
+                */
+               struct efx_short_copy_buffer copy_buf;
+
+               copy_buf.used = 0;
+
+               efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+                                        &piobuf, &copy_buf);
+               efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+       } else {
+               /* Pad the write to the size of a cache line.
+                * We can do this because we know the skb_shared_info sruct is
+                * after the source, and the destination buffer is big enough.
+                */
+               BUILD_BUG_ON(L1_CACHE_BYTES >
+                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+               memcpy_toio(tx_queue->piobuf, skb->data,
+                           ALIGN(skb->len, L1_CACHE_BYTES));
+       }
+
+       EFX_POPULATE_QWORD_5(buffer->option,
+                            ESF_DZ_TX_DESC_IS_OPT, 1,
+                            ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+                            ESF_DZ_TX_PIO_CONT, 0,
+                            ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+                            ESF_DZ_TX_PIO_BUF_ADDR,
+                            tx_queue->piobuf_offset);
+       ++tx_queue->pio_packets;
+       ++tx_queue->insert_count;
+       return buffer;
+}
+#endif /* EFX_USE_PIO */
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -167,7 +344,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
-       unsigned int len, unmap_len = 0, insert_ptr;
+       unsigned int len, unmap_len = 0;
        dma_addr_t dma_addr, unmap_addr = 0;
        unsigned int dma_len;
        unsigned short dma_flags;
@@ -189,6 +366,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        return NETDEV_TX_OK;
        }
 
+       /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+       if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+           efx_nic_tx_is_empty(tx_queue) &&
+           efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+               buffer = efx_enqueue_skb_pio(tx_queue, skb);
+               dma_flags = EFX_TX_BUF_OPTION;
+               goto finish_packet;
+       }
+#endif
+
        /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
@@ -208,11 +396,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                /* Add to TX queue, splitting across DMA boundaries */
                do {
-                       insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-                       buffer = &tx_queue->buffer[insert_ptr];
-                       EFX_BUG_ON_PARANOID(buffer->flags);
-                       EFX_BUG_ON_PARANOID(buffer->len);
-                       EFX_BUG_ON_PARANOID(buffer->unmap_len);
+                       buffer = efx_tx_queue_get_insert_buffer(tx_queue);
 
                        dma_len = efx_max_tx_len(efx, dma_addr);
                        if (likely(dma_len >= len))
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        }
 
        /* Transfer ownership of the skb to the final buffer */
+finish_packet:
        buffer->skb = skb;
        buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        while (tx_queue->insert_count != tx_queue->write_count) {
                unsigned int pkts_compl = 0, bytes_compl = 0;
                --tx_queue->insert_count;
-               insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-               buffer = &tx_queue->buffer[insert_ptr];
+               buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
        }
 
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ *     descriptors
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
        /* Output position */
        unsigned out_len;
        unsigned seqnum;
-       unsigned ipv4_id;
+       u16 ipv4_id;
        unsigned packet_space;
 
        /* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
        unsigned int tcp_off;
        unsigned header_len;
        unsigned int ip_base_len;
+       dma_addr_t header_dma_addr;
+       unsigned int header_unmap_len;
 };
 
 
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 {
        struct efx_tx_buffer *buffer;
        struct efx_nic *efx = tx_queue->efx;
-       unsigned dma_len, insert_ptr;
+       unsigned dma_len;
 
        EFX_BUG_ON_PARANOID(len <= 0);
 
        while (1) {
-               insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-               buffer = &tx_queue->buffer[insert_ptr];
+               buffer = efx_tx_queue_get_insert_buffer(tx_queue);
                ++tx_queue->insert_count;
 
                EFX_BUG_ON_PARANOID(tx_queue->insert_count -
                                    tx_queue->read_count >=
                                    efx->txq_entries);
 
-               EFX_BUG_ON_PARANOID(buffer->len);
-               EFX_BUG_ON_PARANOID(buffer->unmap_len);
-               EFX_BUG_ON_PARANOID(buffer->flags);
-
                buffer->dma_addr = dma_addr;
 
                dma_len = efx_max_tx_len(efx, dma_addr);
@@ -814,19 +998,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
-               buffer = &tx_queue->buffer[tx_queue->insert_count &
-                                          tx_queue->ptr_mask];
+               buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
                efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
        }
 }
 
 
 /* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+                    const struct sk_buff *skb)
 {
+       bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+       struct device *dma_dev = &efx->pci_dev->dev;
+       unsigned int header_len, in_len;
+       dma_addr_t dma_addr;
+
        st->ip_off = skb_network_header(skb) - skb->data;
        st->tcp_off = skb_transport_header(skb) - skb->data;
-       st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       in_len = skb_headlen(skb) - header_len;
+       st->header_len = header_len;
+       st->in_len = in_len;
        if (st->protocol == htons(ETH_P_IP)) {
                st->ip_base_len = st->header_len - st->ip_off;
                st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1032,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
 
-       st->out_len = skb->len - st->header_len;
-       st->unmap_len = 0;
-       st->dma_flags = 0;
+       st->out_len = skb->len - header_len;
+
+       if (!use_options) {
+               st->header_unmap_len = 0;
+
+               if (likely(in_len == 0)) {
+                       st->dma_flags = 0;
+                       st->unmap_len = 0;
+                       return 0;
+               }
+
+               dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+                                         in_len, DMA_TO_DEVICE);
+               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+               st->dma_addr = dma_addr;
+               st->unmap_addr = dma_addr;
+               st->unmap_len = in_len;
+       } else {
+               dma_addr = dma_map_single(dma_dev, skb->data,
+                                         skb_headlen(skb), DMA_TO_DEVICE);
+               st->header_dma_addr = dma_addr;
+               st->header_unmap_len = skb_headlen(skb);
+               st->dma_flags = 0;
+               st->dma_addr = dma_addr + header_len;
+               st->unmap_len = 0;
+       }
+
+       return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1077,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
        return -ENOMEM;
 }
 
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
-                                const struct sk_buff *skb)
-{
-       int hl = st->header_len;
-       int len = skb_headlen(skb) - hl;
-
-       st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
-                                       len, DMA_TO_DEVICE);
-       if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
-               st->unmap_len = len;
-               st->in_len = len;
-               st->dma_addr = st->unmap_addr;
-               return 0;
-       }
-       return -ENOMEM;
-}
-
 
 /**
  * tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -944,55 +1143,97 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                                struct tso_state *st)
 {
        struct efx_tx_buffer *buffer =
-               &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-       struct tcphdr *tsoh_th;
-       unsigned ip_length;
-       u8 *header;
-       int rc;
+               efx_tx_queue_get_insert_buffer(tx_queue);
+       bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+       u8 tcp_flags_clear;
 
-       /* Allocate and insert a DMA-mapped header buffer. */
-       header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
-       if (!header)
-               return -ENOMEM;
-
-       tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
-       /* Copy and update the headers. */
-       memcpy(header, skb->data, st->header_len);
-
-       tsoh_th->seq = htonl(st->seqnum);
-       st->seqnum += skb_shinfo(skb)->gso_size;
-       if (st->out_len > skb_shinfo(skb)->gso_size) {
-               /* This packet will not finish the TSO burst. */
+       if (!is_last) {
                st->packet_space = skb_shinfo(skb)->gso_size;
-               tsoh_th->fin = 0;
-               tsoh_th->psh = 0;
+               tcp_flags_clear = 0x09; /* mask out FIN and PSH */
        } else {
-               /* This packet will be the last in the TSO burst. */
                st->packet_space = st->out_len;
-               tsoh_th->fin = tcp_hdr(skb)->fin;
-               tsoh_th->psh = tcp_hdr(skb)->psh;
+               tcp_flags_clear = 0x00;
        }
-       ip_length = st->ip_base_len + st->packet_space;
 
-       if (st->protocol == htons(ETH_P_IP)) {
-               struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+       if (!st->header_unmap_len) {
+               /* Allocate and insert a DMA-mapped header buffer. */
+               struct tcphdr *tsoh_th;
+               unsigned ip_length;
+               u8 *header;
+               int rc;
+
+               header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+               if (!header)
+                       return -ENOMEM;
 
-               tsoh_iph->tot_len = htons(ip_length);
+               tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+               /* Copy and update the headers. */
+               memcpy(header, skb->data, st->header_len);
+
+               tsoh_th->seq = htonl(st->seqnum);
+               ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+               ip_length = st->ip_base_len + st->packet_space;
+
+               if (st->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *tsoh_iph =
+                               (struct iphdr *)(header + st->ip_off);
+
+                       tsoh_iph->tot_len = htons(ip_length);
+                       tsoh_iph->id = htons(st->ipv4_id);
+               } else {
+                       struct ipv6hdr *tsoh_iph =
+                               (struct ipv6hdr *)(header + st->ip_off);
+
+                       tsoh_iph->payload_len = htons(ip_length);
+               }
 
-               /* Linux leaves suitable gaps in the IP ID space for us to fill. */
-               tsoh_iph->id = htons(st->ipv4_id);
-               st->ipv4_id++;
+               rc = efx_tso_put_header(tx_queue, buffer, header);
+               if (unlikely(rc))
+                       return rc;
        } else {
-               struct ipv6hdr *tsoh_iph =
-                       (struct ipv6hdr *)(header + st->ip_off);
+               /* Send the original headers with a TSO option descriptor
+                * in front
+                */
+               u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
 
-               tsoh_iph->payload_len = htons(ip_length);
+               buffer->flags = EFX_TX_BUF_OPTION;
+               buffer->len = 0;
+               buffer->unmap_len = 0;
+               EFX_POPULATE_QWORD_5(buffer->option,
+                                    ESF_DZ_TX_DESC_IS_OPT, 1,
+                                    ESF_DZ_TX_OPTION_TYPE,
+                                    ESE_DZ_TX_OPTION_DESC_TSO,
+                                    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+                                    ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+                                    ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+               ++tx_queue->insert_count;
+
+               /* We mapped the headers in tso_start().  Unmap them
+                * when the last segment is completed.
+                */
+               buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+               buffer->dma_addr = st->header_dma_addr;
+               buffer->len = st->header_len;
+               if (is_last) {
+                       buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+                       buffer->unmap_len = st->header_unmap_len;
+                       /* Ensure we only unmap them once in case of a
+                        * later DMA mapping error and rollback
+                        */
+                       st->header_unmap_len = 0;
+               } else {
+                       buffer->flags = EFX_TX_BUF_CONT;
+                       buffer->unmap_len = 0;
+               }
+               ++tx_queue->insert_count;
        }
 
-       rc = efx_tso_put_header(tx_queue, buffer, header);
-       if (unlikely(rc))
-               return rc;
+       st->seqnum += skb_shinfo(skb)->gso_size;
+
+       /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+       ++st->ipv4_id;
 
        ++tx_queue->tso_packets;
 
@@ -1023,12 +1264,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 
        EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
-       tso_start(&state, skb);
+       rc = tso_start(&state, efx, skb);
+       if (rc)
+               goto mem_err;
 
-       /* Assume that skb header area contains exactly the headers, and
-        * all payload is in the frag list.
-        */
-       if (skb_headlen(skb) == state.header_len) {
+       if (likely(state.in_len == 0)) {
                /* Grab the first payload fragment. */
                EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
                frag_i = 0;
@@ -1037,9 +1277,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                if (rc)
                        goto mem_err;
        } else {
-               rc = tso_get_head_fragment(&state, efx, skb);
-               if (rc)
-                       goto mem_err;
+               /* Payload starts in the header area. */
                frag_i = -1;
        }
 
@@ -1091,6 +1329,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                                       state.unmap_len, DMA_TO_DEVICE);
        }
 
+       /* Free the header DMA mapping, if using option descriptors */
+       if (state.header_unmap_len)
+               dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+                                state.header_unmap_len, DMA_TO_DEVICE);
+
        efx_enqueue_unwind(tx_queue);
        return NETDEV_TX_OK;
 }
index 770036bc2d87c9fde8335241a43b52a5df581a6f..513ed8b1ba582add37602ab4c93faaf462c7f3a3 100644 (file)
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
        dev->watchdog_timeo     = timeout;
        dev->irq                = MACE_ETHERNET_IRQ;
        dev->base_addr          = (unsigned long)&mace->eth;
-       memcpy(dev->dev_addr, o2meth_eaddr, 6);
+       memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
 
        priv = netdev_priv(dev);
        spin_lock_init(&priv->meth_lock);
index ee18e6f7b4fe19d9b8e3bbec69fd5e2cd098cb81..acbbe48a519c0c673ff8de55c31b1690c3976cb5 100644 (file)
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
        cancel_work_sync(&tp->phy_task);
        unregister_netdev(dev);
        sis190_release_board(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver sis190_pci_driver = {
index 03b256af7ed5f8c68a75a47cea63cf8f6bc7bb2e..8c5c24a16f8a22e2cd7e78ca22ee9c9e253d38b9 100644 (file)
@@ -1535,7 +1535,6 @@ static void epic_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        /* pci_power_off(pdev, -1); */
 }
 
index e85c2e7e82468e8d53d4e5a14f692d9ac6a34c5c..afd9873e9bdb4a527666681e2b55da0504e43ea5 100644 (file)
@@ -95,14 +95,6 @@ static const char version[] =
 #define USE_32_BIT 1
 #endif
 
-#if defined(__H8300H__) || defined(__H8300S__)
-#define NO_AUTOPROBE
-#undef insl
-#undef outsl
-#define insl(a,b,l)  io_insl_noswap(a,b,l)
-#define outsl(a,b,l) io_outsl_noswap(a,b,l)
-#endif
-
 /*
  .the SMC9194 can be at any of the following port addresses.  To change,
  .for a slightly different card, you can add it to the array.  Keep in
@@ -114,12 +106,6 @@ struct devlist {
        unsigned int irq;
 };
 
-#if defined(CONFIG_H8S_EDOSK2674)
-static struct devlist smc_devlist[] __initdata = {
-       {.port = 0xf80000, .irq = 16},
-       {.port = 0,        .irq = 0 },
-};
-#else
 static struct devlist smc_devlist[] __initdata = {
        {.port = 0x200, .irq = 0},
        {.port = 0x220, .irq = 0},
@@ -139,7 +125,6 @@ static struct devlist smc_devlist[] __initdata = {
        {.port = 0x3E0, .irq = 0},
        {.port = 0,     .irq = 0},
 };
-#endif
 /*
  . Wait time for memory to be free.  This probably shouldn't be
  . tuned that much, as waiting for this means nothing else happens
@@ -651,11 +636,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
 #ifdef USE_32_BIT
        if ( length & 0x2  ) {
                outsl(ioaddr + DATA_1, buf,  length >> 2 );
-#if !defined(__H8300H__) && !defined(__H8300S__)
                outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#else
-               ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#endif
        }
        else
                outsl(ioaddr + DATA_1, buf,  length >> 2 );
@@ -899,7 +880,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
                retval = -ENODEV;
                goto err_out;
        }
-#if !defined(CONFIG_H8S_EDOSK2674)
        /* well, we've already written once, so hopefully another time won't
           hurt.  This time, I need to switch the bank register to bank 1,
           so I can access the base address register */
@@ -914,10 +894,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
                retval = -ENODEV;
                goto err_out;
        }
-#else
-       (void)base_address_register; /* Warning suppression */
-#endif
-
 
        /*  check if the revision register is something that I recognize.
            These might need to be added to later, as future revisions
index 5730fe2445a6c1acf5dfa5cd8767d1b1bb749734..98eedb90cdc3c0220c6f8f4d26d2b7f945cf9624 100644 (file)
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] =  {
                        void __iomem *__ioaddr = ioaddr;                \
                        if (__len >= 2 && (unsigned long)__ptr & 2) {   \
                                __len -= 2;                             \
-                               SMC_outw(*(u16 *)__ptr, ioaddr,         \
-                                       DATA_REG(lp));          \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                                __ptr += 2;                             \
                        }                                               \
                        if (SMC_CAN_USE_DATACS && lp->datacs)           \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] =  {
                        SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
                        if (__len & 2) {                                \
                                __ptr += (__len & ~3);                  \
-                               SMC_outw(*((u16 *)__ptr), ioaddr,       \
-                                        DATA_REG(lp));         \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                        }                                               \
                } else if (SMC_16BIT(lp))                               \
                        SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1);   \
index 5fdbc2686eb3a2f6dda0a9f1f4339e641011ffb4..01f8459c321393342def894dbce875322eee3402 100644 (file)
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
                SMSC_TRACE(pdata, probe,
                           "MAC Address is specified by configuration");
        } else if (is_valid_ether_addr(pdata->config.mac)) {
-               memcpy(dev->dev_addr, pdata->config.mac, 6);
+               memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
                SMSC_TRACE(pdata, probe,
                           "MAC Address specified by platform data");
        } else {
index 5f9e79f7f2df52f8b6a3c1955bd7e99ba4bbb122..e55e3365a30658c793f751c11b05094256bbdf50 100644 (file)
@@ -1707,8 +1707,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
        if (!dev)
                return;
 
-       pci_set_drvdata(pdev, NULL);
-
        pd = netdev_priv(dev);
        unregister_netdev(dev);
 
index 7eb8babed2cbe38f822aab596f2451d75112c9a4..fc94f202a43e40f24247019fb6c5906da8ac20be 100644 (file)
@@ -451,14 +451,14 @@ struct mac_device_info {
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
 struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
 
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
-                               unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
-                               unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+                        unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                        unsigned int high, unsigned int low);
 
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
 extern const struct stmmac_chain_mode_ops chain_mode_ops;
 
index 8e5662ce488bd80d2a0a7457c25fbf49ca523bb1..def266da55dbe617e8be83f7a3d630f8230b9c5c 100644 (file)
 #define DMA_STATUS_TI  0x00000001      /* Transmit Interrupt */
 #define DMA_CONTROL_FTF                0x00100000      /* Flush transmit FIFO */
 
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
-                              struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
 
 #endif /* __DWMAC_DMA_H__ */
index 48ec001566b5540421c5e5a25fe0b18db3c25e58..8607488cbcfcfaea7bc70510f0a3f601c5dade7d 100644 (file)
@@ -128,8 +128,8 @@ struct stmmac_counters {
        unsigned int mmc_rx_icmp_err_octets;
 };
 
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
 
 #endif /* __MMC_H__ */
index f16a9bdf45bb6738c7fca4063cf96307d6303462..22f89ffdfd95fc3e8ca530808e4d6d0dfdcfeaf2 100644 (file)
@@ -110,14 +110,14 @@ struct stmmac_priv {
 
 extern int phyaddr;
 
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
 extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
index 023b7c29cb2fc3408ff3e3a00c03b9656ddd2ab4..644d80ece067717f3dd296d6ed5f54d9c90a7134 100644 (file)
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
 
        stmmac_dvr_remove(ndev);
 
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, priv->ioaddr);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 759441b29e535b4c7b1c0f94e6f7520fc628fbdd..b4d50d74ba183b29156bce538af3c5c03369d9ea 100644 (file)
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
 #if defined(CONFIG_SPARC)
        addr = of_get_property(cp->of_node, "local-mac-address", NULL);
        if (addr != NULL) {
-               memcpy(dev_addr, addr, 6);
+               memcpy(dev_addr, addr, ETH_ALEN);
                goto done;
        }
 #endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
 
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return -ENODEV;
 }
 
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
        free_netdev(dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index f28460ce24a7134a371dd148887cc5466e934f8b..388540fcb9773dab8973094bb4ec76f74fe4f73d 100644 (file)
@@ -9875,7 +9875,6 @@ err_out_free_res:
 
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        return err;
 }
@@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index e62df2b81302bd32881ca22daef127e04985679e..b5655b79bd3b25bcd2e90376fd18e67fb78e1f93 100644 (file)
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
                return -1;
 #endif
        }
-       memcpy(dev->dev_addr, addr, 6);
+       memcpy(dev->dev_addr, addr, ETH_ALEN);
 #else
        get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
 #endif
@@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev)
                iounmap(gp->regs);
                pci_release_regions(pdev);
                free_netdev(dev);
-
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index e37b587b386048dd299ac6ad0af55dd47d3e199f..0dbf46f08ed56a479a647a18f4c73692f2ec6160 100644 (file)
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
 
                addr = of_get_property(dp, "local-mac-address", &len);
 
-               if (qfe_slot != -1 && addr && len == 6)
-                       memcpy(dev->dev_addr, addr, 6);
+               if (qfe_slot != -1 && addr && len == ETH_ALEN)
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                else
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        }
 
        hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
                    (addr = of_get_property(dp, "local-mac-address", &len))
                        != NULL &&
                    len == 6) {
-                       memcpy(dev->dev_addr, addr, 6);
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                } else {
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
                }
 #else
                get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
        pci_release_regions(hp->happy_dev);
 
        free_netdev(net_dev);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
index b072f4dba033c1661bf4a341747b1d82d4874fc7..5695ae2411dea0f74718d3a87254aab4cf97ac42 100644 (file)
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
        if (!dev)
                return -ENOMEM;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 
        qe = netdev_priv(dev);
 
index 571452e786d5103460180e54481cd11ae88e4927..dd0dd6279b4eec8168c006457643568032ee358f 100644 (file)
@@ -2447,7 +2447,6 @@ static void bdx_remove(struct pci_dev *pdev)
        iounmap(nic->regs);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        vfree(nic);
 
        RET();
index de71b1ec4625e8f21a9ad4d56db5fa8592c89e6e..53150c25a96bd455f58dc30ca3c592b6a598461e 100644 (file)
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
          To compile this driver as a module, choose M here: the module
          will be called davinci_cpdma.  This is recommended.
 
+config TI_CPSW_PHY_SEL
+       boolean "TI CPSW Switch Phy sel Support"
+       depends on TI_CPSW
+       ---help---
+         This driver supports configuring of the phy mode connected to
+         the CPSW.
+
 config TI_CPSW
        tristate "TI CPSW Switch Support"
        depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
        select TI_DAVINCI_CPDMA
        select TI_DAVINCI_MDIO
+       select TI_CPSW_PHY_SEL
        ---help---
          This driver supports TI's CPSW Ethernet Switch.
 
index c65148e8aa1d4810e00cbe6d7448fa72572847d7..9cfaab8152be08aa16bc37c91f622ba8e287989b 100644 (file)
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
 obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644 (file)
index 0000000..148da9a
--- /dev/null
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII       0
+#define AM33XX_GMII_SEL_MODE_RMII      1
+#define AM33XX_GMII_SEL_MODE_RGMII     2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN        BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN        BIT(6)
+
+struct cpsw_phy_sel_priv {
+       struct device   *dev;
+       u32 __iomem     *gmii_sel;
+       bool            rmii_clock_external;
+       void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+                            phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+                                phy_interface_t phy_mode, int slave)
+{
+       u32 reg;
+       u32 mask;
+       u32 mode = 0;
+
+       reg = readl(priv->gmii_sel);
+
+       switch (phy_mode) {
+       case PHY_INTERFACE_MODE_RMII:
+               mode = AM33XX_GMII_SEL_MODE_RMII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               mode = AM33XX_GMII_SEL_MODE_RGMII;
+               break;
+
+       case PHY_INTERFACE_MODE_MII:
+       default:
+               mode = AM33XX_GMII_SEL_MODE_MII;
+               break;
+       };
+
+       mask = 0x3 << (slave * 2) | BIT(slave + 6);
+       mode <<= slave * 2;
+
+       if (priv->rmii_clock_external) {
+               if (slave == 0)
+                       mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+               else
+                       mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+       }
+
+       reg &= ~mask;
+       reg |= mode;
+
+       writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+       struct device_node *node = (struct device_node *)data;
+       return dev->of_node == node &&
+               dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+       struct device_node *node;
+       struct cpsw_phy_sel_priv *priv;
+
+       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       if (!node) {
+               dev_err(dev, "Phy mode driver DT not found\n");
+               return;
+       }
+
+       dev = bus_find_device(&platform_bus_type, NULL, node, match);
+       priv = dev_get_drvdata(dev);
+
+       priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+       {
+               .compatible     = "ti,am3352-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_am3352,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       const struct of_device_id *of_id;
+       struct cpsw_phy_sel_priv *priv;
+
+       of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+       if (!of_id)
+               return -EINVAL;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+               return -ENOMEM;
+       }
+
+       priv->cpsw_phy_sel = of_id->data;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+       priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->gmii_sel))
+               return PTR_ERR(priv->gmii_sel);
+
+       if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+               priv->rmii_clock_external = true;
+
+       dev_set_drvdata(&pdev->dev, priv);
+
+       return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+       .probe          = cpsw_phy_sel_probe,
+       .driver         = {
+               .name   = "cpsw-phy-sel",
+               .owner  = THIS_MODULE,
+               .of_match_table = cpsw_phy_sel_id_table,
+       },
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
index 79974e31187ac19af63452a3a0c421a5ff2b7cf7..90d41d26ec6d8c37f04682aa05b8731a4d4c3002 100644 (file)
@@ -367,8 +367,6 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct resource                 *cpsw_res;
-       struct resource                 *cpsw_wr_res;
        struct napi_struct              napi;
        struct device                   *dev;
        struct cpsw_platform_data       data;
@@ -639,13 +637,6 @@ void cpsw_rx_handler(void *token, int len, int status)
 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
-       u32 rx, tx, rx_thresh;
-
-       rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
-       rx = __raw_readl(&priv->wr_regs->rx_stat);
-       tx = __raw_readl(&priv->wr_regs->tx_stat);
-       if (!rx_thresh && !rx && !tx)
-               return IRQ_NONE;
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
@@ -1023,6 +1014,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                dev_info(priv->dev, "phy found : id is : 0x%x\n",
                         slave->phy->phy_id);
                phy_start(slave->phy);
+
+               /* Configure GMII_SEL register */
+               cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+                            slave->slave_num);
        }
 }
 
@@ -1169,9 +1164,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
                }
        }
 
+       napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       napi_enable(&priv->napi);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
@@ -1712,67 +1707,60 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
        if (of_property_read_u32(node, "active_slave", &prop)) {
                pr_err("Missing active_slave property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->active_slave = prop;
 
        if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
                pr_err("Missing cpts_clock_mult property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->cpts_clock_mult = prop;
 
        if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
                pr_err("Missing cpts_clock_shift property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->cpts_clock_shift = prop;
 
-       data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
-                                  GFP_KERNEL);
+       data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+                                       * sizeof(struct cpsw_slave_data),
+                                       GFP_KERNEL);
        if (!data->slave_data)
-               return -EINVAL;
+               return -ENOMEM;
 
        if (of_property_read_u32(node, "cpdma_channels", &prop)) {
                pr_err("Missing cpdma_channels property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->channels = prop;
 
        if (of_property_read_u32(node, "ale_entries", &prop)) {
                pr_err("Missing ale_entries property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->ale_entries = prop;
 
        if (of_property_read_u32(node, "bd_ram_size", &prop)) {
                pr_err("Missing bd_ram_size property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->bd_ram_size = prop;
 
        if (of_property_read_u32(node, "rx_descs", &prop)) {
                pr_err("Missing rx_descs property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->rx_descs = prop;
 
        if (of_property_read_u32(node, "mac_control", &prop)) {
                pr_err("Missing mac_control property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->mac_control = prop;
 
-       if (!of_property_read_u32(node, "dual_emac", &prop))
-               data->dual_emac = prop;
+       if (of_property_read_bool(node, "dual_emac"))
+               data->dual_emac = 1;
 
        /*
         * Populate all the child nodes here...
@@ -1782,7 +1770,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        if (ret)
                pr_warn("Doesn't have any child node\n");
 
-       for_each_node_by_name(slave_node, "slave") {
+       for_each_child_of_node(node, slave_node) {
                struct cpsw_slave_data *slave_data = data->slave_data + i;
                const void *mac_addr = NULL;
                u32 phyid;
@@ -1791,11 +1779,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                struct device_node *mdio_node;
                struct platform_device *mdio;
 
+               /* This is no slave child node, continue */
+               if (strcmp(slave_node->name, "slave"))
+                       continue;
+
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        pr_err("Missing slave[%d] phy_id property\n", i);
-                       ret = -EINVAL;
-                       goto error_ret;
+                       return -EINVAL;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
@@ -1825,10 +1816,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
 
        return 0;
-
-error_ret:
-       kfree(data->slave_data);
-       return ret;
 }
 
 static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1870,7 +1857,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        priv_sl2->coal_intvl = 0;
        priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 
-       priv_sl2->cpsw_res = priv->cpsw_res;
        priv_sl2->regs = priv->regs;
        priv_sl2->host_port = priv->host_port;
        priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1914,8 +1900,8 @@ static int cpsw_probe(struct platform_device *pdev)
        struct cpsw_priv                *priv;
        struct cpdma_params             dma_params;
        struct cpsw_ale_params          ale_params;
-       void __iomem                    *ss_regs, *wr_regs;
-       struct resource                 *res;
+       void __iomem                    *ss_regs;
+       struct resource                 *res, *ss_res;
        u32 slave_offset, sliver_offset, slave_size;
        int ret = 0, i, k = 0;
 
@@ -1951,7 +1937,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (cpsw_probe_dt(&priv->data, pdev)) {
                pr_err("cpsw: platform data missing\n");
                ret = -ENODEV;
-               goto clean_ndev_ret;
+               goto clean_runtime_disable_ret;
        }
        data = &priv->data;
 
@@ -1965,11 +1951,12 @@ static int cpsw_probe(struct platform_device *pdev)
 
        memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
 
-       priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
-                              GFP_KERNEL);
+       priv->slaves = devm_kzalloc(&pdev->dev,
+                                   sizeof(struct cpsw_slave) * data->slaves,
+                                   GFP_KERNEL);
        if (!priv->slaves) {
-               ret = -EBUSY;
-               goto clean_ndev_ret;
+               ret = -ENOMEM;
+               goto clean_runtime_disable_ret;
        }
        for (i = 0; i < data->slaves; i++)
                priv->slaves[i].slave_num = i;
@@ -1977,55 +1964,31 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->slaves[0].ndev = ndev;
        priv->emac_port = 0;
 
-       priv->clk = clk_get(&pdev->dev, "fck");
+       priv->clk = devm_clk_get(&pdev->dev, "fck");
        if (IS_ERR(priv->clk)) {
-               dev_err(&pdev->dev, "fck is not found\n");
+               dev_err(priv->dev, "fck is not found\n");
                ret = -ENODEV;
-               goto clean_slave_ret;
+               goto clean_runtime_disable_ret;
        }
        priv->coal_intvl = 0;
        priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
 
-       priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!priv->cpsw_res) {
-               dev_err(priv->dev, "error getting i/o resource\n");
-               ret = -ENOENT;
-               goto clean_clk_ret;
-       }
-       if (!request_mem_region(priv->cpsw_res->start,
-                               resource_size(priv->cpsw_res), ndev->name)) {
-               dev_err(priv->dev, "failed request i/o region\n");
-               ret = -ENXIO;
-               goto clean_clk_ret;
-       }
-       ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
-       if (!ss_regs) {
-               dev_err(priv->dev, "unable to map i/o region\n");
-               goto clean_cpsw_iores_ret;
+       ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+       if (IS_ERR(ss_regs)) {
+               ret = PTR_ERR(ss_regs);
+               goto clean_runtime_disable_ret;
        }
        priv->regs = ss_regs;
        priv->version = __raw_readl(&priv->regs->id_ver);
        priv->host_port = HOST_PORT_NUM;
 
-       priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!priv->cpsw_wr_res) {
-               dev_err(priv->dev, "error getting i/o resource\n");
-               ret = -ENOENT;
-               goto clean_iomap_ret;
-       }
-       if (!request_mem_region(priv->cpsw_wr_res->start,
-                       resource_size(priv->cpsw_wr_res), ndev->name)) {
-               dev_err(priv->dev, "failed request i/o region\n");
-               ret = -ENXIO;
-               goto clean_iomap_ret;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->wr_regs)) {
+               ret = PTR_ERR(priv->wr_regs);
+               goto clean_runtime_disable_ret;
        }
-       wr_regs = ioremap(priv->cpsw_wr_res->start,
-                               resource_size(priv->cpsw_wr_res));
-       if (!wr_regs) {
-               dev_err(priv->dev, "unable to map i/o region\n");
-               goto clean_cpsw_wr_iores_ret;
-       }
-       priv->wr_regs = wr_regs;
 
        memset(&dma_params, 0, sizeof(dma_params));
        memset(&ale_params, 0, sizeof(ale_params));
@@ -2056,12 +2019,12 @@ static int cpsw_probe(struct platform_device *pdev)
                slave_size           = CPSW2_SLAVE_SIZE;
                sliver_offset        = CPSW2_SLIVER_OFFSET;
                dma_params.desc_mem_phys =
-                       (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+                       (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
                break;
        default:
                dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
                ret = -ENODEV;
-               goto clean_cpsw_wr_iores_ret;
+               goto clean_runtime_disable_ret;
        }
        for (i = 0; i < priv->data.slaves; i++) {
                struct cpsw_slave *slave = &priv->slaves[i];
@@ -2089,7 +2052,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (!priv->dma) {
                dev_err(priv->dev, "error initializing dma\n");
                ret = -ENOMEM;
-               goto clean_wr_iomap_ret;
+               goto clean_runtime_disable_ret;
        }
 
        priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2124,8 +2087,8 @@ static int cpsw_probe(struct platform_device *pdev)
 
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
-                       if (request_irq(i, cpsw_interrupt, 0,
-                                       dev_name(&pdev->dev), priv)) {
+                       if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+                                            dev_name(priv->dev), priv)) {
                                dev_err(priv->dev, "error attaching irq\n");
                                goto clean_ale_ret;
                        }
@@ -2147,7 +2110,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(priv->dev, "error registering net device\n");
                ret = -ENODEV;
-               goto clean_irq_ret;
+               goto clean_ale_ret;
        }
 
        if (cpts_register(&pdev->dev, priv->cpts,
@@ -2155,44 +2118,27 @@ static int cpsw_probe(struct platform_device *pdev)
                dev_err(priv->dev, "error registering cpts device\n");
 
        cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
-                 priv->cpsw_res->start, ndev->irq);
+                   ss_res->start, ndev->irq);
 
        if (priv->data.dual_emac) {
                ret = cpsw_probe_dual_emac(pdev, priv);
                if (ret) {
                        cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
-                       goto clean_irq_ret;
+                       goto clean_ale_ret;
                }
        }
 
        return 0;
 
-clean_irq_ret:
-       for (i = 0; i < priv->num_irqs; i++)
-               free_irq(priv->irqs_table[i], priv);
 clean_ale_ret:
        cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
        cpdma_chan_destroy(priv->txch);
        cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
-       iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
-       release_mem_region(priv->cpsw_wr_res->start,
-                          resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
-       iounmap(priv->regs);
-clean_cpsw_iores_ret:
-       release_mem_region(priv->cpsw_res->start,
-                          resource_size(priv->cpsw_res));
-clean_clk_ret:
-       clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
        pm_runtime_disable(&pdev->dev);
-       kfree(priv->slaves);
 clean_ndev_ret:
-       kfree(priv->data.slave_data);
        free_netdev(priv->ndev);
        return ret;
 }
@@ -2201,30 +2147,18 @@ static int cpsw_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct cpsw_priv *priv = netdev_priv(ndev);
-       int i;
 
        if (priv->data.dual_emac)
                unregister_netdev(cpsw_get_slave_ndev(priv, 1));
        unregister_netdev(ndev);
 
        cpts_unregister(priv->cpts);
-       for (i = 0; i < priv->num_irqs; i++)
-               free_irq(priv->irqs_table[i], priv);
 
        cpsw_ale_destroy(priv->ale);
        cpdma_chan_destroy(priv->txch);
        cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
-       iounmap(priv->regs);
-       release_mem_region(priv->cpsw_res->start,
-                          resource_size(priv->cpsw_res));
-       iounmap(priv->wr_regs);
-       release_mem_region(priv->cpsw_wr_res->start,
-                          resource_size(priv->cpsw_wr_res));
        pm_runtime_disable(&pdev->dev);
-       clk_put(priv->clk);
-       kfree(priv->slaves);
-       kfree(priv->data.slave_data);
        if (priv->data.dual_emac)
                free_netdev(cpsw_get_slave_ndev(priv, 1));
        free_netdev(ndev);
@@ -2280,7 +2214,7 @@ static struct platform_driver cpsw_driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
-               .of_match_table = of_match_ptr(cpsw_of_mtable),
+               .of_match_table = cpsw_of_mtable,
        },
        .probe = cpsw_probe,
        .remove = cpsw_remove,
index eb3e101ec04878c87f8a3a6dee6243f9d6970b52..574f49da693f194fe9a32ac731c90487498df9ba 100644 (file)
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
        bool    dual_emac;      /* Enable Dual EMAC mode */
 };
 
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
 #endif /* __CPSW_H__ */
index fe993cdd7e23b76ad13f09585c215d87c83a0e46..1a581ef7eee8fcbd2ed121d64a747e06e84c6113 100644 (file)
@@ -127,8 +127,8 @@ struct cpts {
 };
 
 #ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
 #else
 static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 }
 #endif
 
-extern int cpts_register(struct device *dev, struct cpts *cpts,
-                        u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
 
 #endif
index 67df09ea9d045da26420de1e9da09af58ec0edb8..41ba974bf37cb9175c74ab40bba1817e890749e7 100644 (file)
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
                    netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
                        emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
-               }
-               if (!netdev_mc_empty(ndev)) {
+               } else if (!netdev_mc_empty(ndev)) {
                        struct netdev_hw_addr *ha;
 
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
@@ -1853,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
 
        /* MAC addr and PHY mask , RMII enable info from platform_data */
-       memcpy(priv->mac_addr, pdata->mac_addr, 6);
+       memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
        priv->phy_id = pdata->phy_id;
        priv->rmii_en = pdata->rmii_en;
        priv->version = pdata->version;
index 591437e59b9019c9fa0597a8342f63ae5357fcaa..62b19be5183d3349433ee5855ad082b1560b5c69 100644 (file)
@@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev)
 
        free_netdev(dev);
 
-       pci_set_drvdata(pdev, NULL);
        cancel_work_sync(&priv->tlan_tqueue);
 }
 
index 13e6fff8ca23af28e4b1e2229846522082dd60c4..628b736e5ae776fcf00333bed8c355e4b518314e 100644 (file)
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
                nz_addr |= mac[i];
 
        if (nz_addr) {
-               memcpy(dev->dev_addr, mac, 6);
+               memcpy(dev->dev_addr, mac, ETH_ALEN);
                dev->addr_len = 6;
        } else {
                eth_hw_addr_random(dev);
index 309abb472aa2040e978fea46faff0cfd81c0bb43..8505196be9f52bd7a982ec9dcb91f850379cfd07 100644 (file)
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
 #else
 static inline void udbg_shutdown_ps3gelic(void) {}
 #endif
 
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
 /* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
-                                 struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
 
 /* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
-                                 struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+                          struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
 
 #endif /* _GELIC_NET_H */
index f7e51b7d704960a2eb45ae63d4ed19f134d6d105..11f443d8e4ea9042b6eb6831b0c8d62612b8ad52 100644 (file)
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
 #define GELIC_WL_PRIV_SET_PSK          (SIOCIWFIRSTPRIV + 0)
 #define GELIC_WL_PRIV_GET_PSK          (SIOCIWFIRSTPRIV + 1)
 
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
 #endif /* _GELIC_WIRELESS_H */
index 5734480c1ecfa5f91a134720fe478bb68826fc50..3f4a32e39d276f6fcf9c9d9d16c4aec440b9daf6 100644 (file)
@@ -2478,7 +2478,6 @@ out_release_regions:
        pci_release_regions(pdev);
 out_disable_dev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return NULL;
 }
 
index 4ba2135474d1a118a7a86b579aae38b11eb2c56a..9b6af0845a1101deca4b2083412dd1764fe2cc5b 100644 (file)
@@ -29,8 +29,8 @@
 
 #include <linux/sungem_phy.h>
 
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
 
 extern const struct ethtool_ops spider_net_ethtool_ops;
 
index a971b9cca564c910f8e928806ea9ac443ed22cf2..1322546d92ac97287a358cbea644568aedb98168 100644 (file)
@@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev)
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
        free_netdev(dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int
index c8f088ab5fdfdbb6c9c757f5defd9c9b70588163..4a7293ed95e9c124b84ec87cabf1a9c1ef6ab1f9 100644 (file)
@@ -32,7 +32,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define DRV_NAME       "via-rhine"
-#define DRV_VERSION    "1.5.0"
+#define DRV_VERSION    "1.5.1"
 #define DRV_RELDATE    "2010-10-09"
 
 #include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
        if (unlikely(vlan_tx_tag_present(skb))) {
-               rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+               u16 vid_pcp = vlan_tx_tag_get(skb);
+
+               /* drop CFI/DEI bit, register needs VID and PCP */
+               vid_pcp = (vid_pcp & VLAN_VID_MASK) |
+                         ((vid_pcp & VLAN_PRIO_MASK) >> 1);
+               rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
                /* request tagging */
                rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
        }
@@ -2287,7 +2292,6 @@ static void rhine_remove_one(struct pci_dev *pdev)
 
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void rhine_shutdown (struct pci_dev *pdev)
index b88121f240ca609ea26f911508b829c8fbbdbdd1..0029148077a9805a288a42a9d8207a97ce6e8133 100644 (file)
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
                       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
        lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 
+       /* Init descriptor indexes */
+       lp->tx_bd_ci = 0;
+       lp->tx_bd_next = 0;
+       lp->tx_bd_tail = 0;
+       lp->rx_bd_ci = 0;
+
        return 0;
 
 out:
index 4c619ea5189fc22308b8fe36851824e72eaad0ab..74234a51c851186c0c9bcfbc140b261724144e20 100644 (file)
@@ -31,7 +31,7 @@
 #define DRIVER_NAME "xilinx_emaclite"
 
 /* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET      0x0             /* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET      0x0             /* Transmit Buffer */
 #define XEL_MDIOADDR_OFFSET    0x07E4          /* MDIO Address Register */
 #define XEL_MDIOWR_OFFSET      0x07E8          /* MDIO Write Data Register */
 #define XEL_MDIORD_OFFSET      0x07EC          /* MDIO Read Data Register */
 #define XEL_MDIOCTRL_MDIOEN_MASK  0x00000008   /* MDIO Enable */
 
 /* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK      0x80000000      /* Global Enable */
+#define XEL_GIER_GIE_MASK      0x80000000      /* Global Enable */
 
 /* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK  0x00000001     /* Tx complete */
-#define XEL_TSR_PROGRAM_MASK    0x00000002     /* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK    0x00000008     /* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000    /* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK  0x00000001     /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK    0x00000002     /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK    0x00000008     /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000    /* Buffer is active, SW bit
                                                 * only. This is not documented
                                                 * in the HW spec */
 
 #define XEL_TSR_PROG_MAC_ADDR  (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
 
 /* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK 0x00000001      /* Rx complete */
-#define XEL_RSR_RECV_IE_MASK   0x00000008      /* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001      /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK   0x00000008      /* Rx interrupt enable bit */
 
 /* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK   0x0000FFFF      /* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK   0x0000FFFF      /* Tx packet length */
 
 /* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK   0x0000FFFF      /* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK   0x0000FFFF      /* Rx packet length */
 
-#define XEL_HEADER_OFFSET      12              /* Offset to length field */
-#define XEL_HEADER_SHIFT       16              /* Shift value for length */
+#define XEL_HEADER_OFFSET      12              /* Offset to length field */
+#define XEL_HEADER_SHIFT       16              /* Shift value for length */
 
 /* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE            28      /* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET    16      /* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE            28      /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET    16      /* IP Length Offset */
 
 
 
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
  * This function un maps the IO region of the Emaclite device and frees the net
  * device.
  */
-static void xemaclite_remove_ndev(struct net_device *ndev,
-                                 struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
 {
        if (ndev) {
-               struct net_local *lp = netdev_priv(ndev);
-
-               if (lp->base_addr)
-                       devm_iounmap(&pdev->dev, lp->base_addr);
                free_netdev(ndev);
        }
 }
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
 
        if (mac_address)
                /* Set the MAC address. */
-               memcpy(ndev->dev_addr, mac_address, 6);
+               memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
        else
                dev_warn(dev, "No MAC address found\n");
 
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        return 0;
 
 error:
-       xemaclite_remove_ndev(ndev, ofdev);
+       xemaclite_remove_ndev(ndev);
        return rc;
 }
 
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       xemaclite_remove_ndev(ndev, of_dev);
+       xemaclite_remove_ndev(ndev);
 
        return 0;
 }
index a20ed1a98099f3d959317f68964bbd88639e3d8b..f8399359017405756f06641fa6c0405c5d5ea0a9 100644 (file)
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
         */
        * (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
        a[1] = 0 ;
-       memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+       memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
 
        CHECK_NPP() ;
         /* set memory address reg for writes */
index 3ca308b282148832b534ae91f17f33991f6f6d16..bd1166bf8f61a55c0e3440d2762f1b1e0765cc7b 100644 (file)
@@ -469,20 +469,20 @@ struct s_smc {
 
 extern const struct fddi_addr fddi_broadcast;
 
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
 
 #endif /* _SCMECM_ */
 
index f5d7305a5784174f9868f045d186e9c90d2bbc20..713d303a06a9e1d2ea06e198a9682beb4bc54fc2 100644 (file)
@@ -436,7 +436,7 @@ static  int skfp_driver_init(struct net_device *dev)
        }
        read_address(smc, NULL);
        pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        smt_reset_defaults(smc, 0);
 
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
         *               address.
         */
        read_address(smc, NULL);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        init_smt(smc, NULL);
        smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
        if ((unsigned short) frame[1 + 10] != 0)
                return;
        SRBit = frame[1 + 6] & 0x01;
-       memcpy(&frame[1 + 6], hw_addr, 6);
+       memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
        frame[8] |= SRBit;
 }                              // CheckSourceAddress
 
index a974727dd9a2bb604746a20b3f4e65d22b9801db..636b65c66d49e6568b1e8535e66fac5b9a8dbd49 100644 (file)
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser_fdx", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);
                return -EBUSY;
index e349d867449b2d744ff9550535317c93c069f07b..f9a8976195ba05f0fc07723969e5bb2136140a9b 100644 (file)
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser12", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);       
                return -EBUSY;
index bc1d5217038985565f05e15fe98b2df4078c4114..4bc6ee8e7987796b04a6f1ef5a1a5ffe044ec8af 100644 (file)
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (!Ivec[hwcfg.irq].used && hwcfg.irq)
                        {
                                if (request_irq(hwcfg.irq, scc_isr,
-                                               IRQF_DISABLED, "AX.25 SCC",
+                                               0, "AX.25 SCC",
                                                (void *)(long) hwcfg.irq))
                                        printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
                                else
index 0721e72f9299250c6c29f0e1d3f7f51affffe7e8..1971411574db1c7ae2dd6549490ab6975e816cd1 100644 (file)
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
                goto out_release_base;
        }
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+       if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
                printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
                ret = -EBUSY;
                goto out_release_base;
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
                        return -ENOBUFS;
-               ym->bitrate = 9600;
                if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
                        kfree(ym);
                        return -EFAULT;
index 42e6deee6db55ed607170109438960523d6b9b8e..0632d34905c73811456594cf0bf5e27f8710f5c7 100644 (file)
@@ -82,7 +82,6 @@ struct mrf24j40 {
 
        struct mutex buffer_mutex; /* only used to protect buf */
        struct completion tx_complete;
-       struct work_struct irqwork;
        u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
 };
 
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (ret)
                goto err;
 
+       INIT_COMPLETION(devrec->tx_complete);
+
        /* Set TXNTRIG bit of TXNCON to send packet */
        ret = read_short_reg(devrec, REG_TXNCON, &val);
        if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
                val |= 0x4;
        write_short_reg(devrec, REG_TXNCON, val);
 
-       INIT_COMPLETION(devrec->tx_complete);
-
        /* Wait for the device to send the TX complete interrupt. */
        ret = wait_for_completion_interruptible_timeout(
                                                &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
 static irqreturn_t mrf24j40_isr(int irq, void *data)
 {
        struct mrf24j40 *devrec = data;
-
-       disable_irq_nosync(irq);
-
-       schedule_work(&devrec->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
-       struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
        u8 intstat;
        int ret;
 
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
                mrf24j40_handle_rx(devrec);
 
 out:
-       enable_irq(devrec->spi->irq);
+       return IRQ_HANDLED;
 }
 
 static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
 
        mutex_init(&devrec->buffer_mutex);
        init_completion(&devrec->tx_complete);
-       INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
        devrec->spi = spi;
        spi_set_drvdata(spi, devrec);
 
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
        val &= ~0x3; /* Clear RX mode (normal) */
        write_short_reg(devrec, REG_RXMCR, val);
 
-       ret = request_irq(spi->irq,
-                         mrf24j40_isr,
-                         IRQF_TRIGGER_FALLING,
-                         dev_name(&spi->dev),
-                         devrec);
+       ret = request_threaded_irq(spi->irq,
+                                  NULL,
+                                  mrf24j40_isr,
+                                  IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+                                  dev_name(&spi->dev),
+                                  devrec);
 
        if (ret) {
                dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
        dev_dbg(printdev(devrec), "remove\n");
 
        free_irq(spi->irq, devrec);
-       flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
        ieee802154_unregister_device(devrec->dev);
        ieee802154_free_device(devrec->dev);
        /* TODO: Will ieee802154_free_device() wait until ->xmit() is
index c74f384c87d50a0314b84e2808ab735c00ee8684..303c4bd26e17953de1693475b042266eac8dca90 100644 (file)
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
 
 #else
 
-       if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+       if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
                dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
                return -EBUSY;
        }
 
-       if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+       if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
                dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
                free_irq(port->irq, dev);
                return -EBUSY;
index 31bcb98ef35609b9f0be24cfc63bcd4fd43ae852..768dfe9a93159e4964d4b9c0bc6986da2e6c09b6 100644 (file)
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
     return 0;
 
   rc = request_irq (self->io.irq, toshoboe_interrupt,
-                    IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+                    IRQF_SHARED, dev->name, self);
   if (rc)
        return rc;
 
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
   self->io.fir_base = self->base;
   self->io.fir_ext = OBOE_IO_EXTENT;
   self->io.irq = pci_dev->irq;
-  self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+  self->io.irqflags = IRQF_SHARED;
 
   self->speed = self->io.speed = 9600;
   self->async = 0;
index 4455425f1c777865676150fa5da63b94f0ffc736..ff45cd0d60e84f88b5e34ec40f61e2e5e0d85eb3 100644 (file)
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
                goto err_mem_4;
 
        platform_set_drvdata(pdev, ndev);
-       err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+       err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
        if (err) {
                dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
                goto err_mem_4;
index 89682b49900ff2cac834c85cc58429ec4fb65ec3..8d9ae5a086d54d8d78ba8ae8ee4fee1c9fd76475 100644 (file)
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
                goto err_mem_4;
 
        platform_set_drvdata(pdev, ndev);
-       err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+       err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
        if (err) {
                dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
                goto err_mem_4;
index 6d5b1e2b12893271adb7fd7d4c373cde4823d151..f50b9c1c0639008b1e314099f4918e3eb0ed57db 100644 (file)
@@ -102,28 +102,29 @@ struct sir_driver {
 
 /* exported */
 
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
 
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+                                   const char *name);
+int sirdev_put_instance(struct sir_dev *self);
 
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
 
 /* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
 
 /* not exported */
 
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
 
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
 
 /* inline helpers */
 
index 9bf46bd19b87bf947fd79e547bdde626c8a12097..cc9845ec91c1b1be56cc960a0768ee71b8db69c9 100644 (file)
@@ -828,22 +828,21 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                eth_hw_addr_inherit(dev, lowerdev);
        }
 
+       port->count += 1;
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto destroy_port;
+
        err = netdev_upper_dev_link(lowerdev, dev);
        if (err)
                goto destroy_port;
 
-       port->count += 1;
-       err = register_netdevice(dev);
-       if (err < 0)
-               goto upper_dev_unlink;
 
        list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
 
        return 0;
 
-upper_dev_unlink:
-       netdev_upper_dev_unlink(lowerdev, dev);
 destroy_port:
        port->count -= 1;
        if (!port->count)
index ac22283aaf23213ab9bf9d931f76df602ccea09d..bc71947b1ec329f2eacd52b915b37ce56a4f16bb 100644 (file)
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
                wol->wolopts |= WAKE_MAGIC;
 }
 
+static int at803x_suspend(struct phy_device *phydev)
+{
+       int value;
+       int wol_enabled;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, AT803X_INTR_ENABLE);
+       wol_enabled = value & AT803X_WOL_ENABLE;
+
+       value = phy_read(phydev, MII_BMCR);
+
+       if (wol_enabled)
+               value |= BMCR_ISOLATE;
+       else
+               value |= BMCR_PDOWN;
+
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+       int value;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, MII_BMCR);
+       value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
        int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
index 2e91477362d4d70b15df3db59ad41dec990a3c57..2e3c778ea9bf6f0437f95759f12c902a76cf9e7c 100644 (file)
@@ -34,9 +34,9 @@
 #include <linux/marvell_phy.h>
 #include <linux/of.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #define MII_MARVELL_PHY_PAGE           22
 
index c31aad0004cb5ed93453089114e9f7dc31894ab2..3ae28f420868fc35af3b3ed4e652f96e43a8f35b 100644 (file)
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ks8737_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ksz9021_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ksz9021_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 } };
 
index 1f7bef90b46757a6ce2485c9aefd5d197464b7b1..7b4ff35c8bf7dcb28b455fb3a578eacddb298900 100644 (file)
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
                /* Any address will do - we take the first */
                const struct in_ifaddr *ifa = in_dev->ifa_list;
                if (ifa) {
-                       memcpy(eth->h_source, dev->dev_addr, 6);
+                       memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
                        memset(eth->h_dest, 0xfc, 2);
                        memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
                }
index a34d6bf5e43b5b29325215e358cca9ee3fc734ee..cc70ecfc70626789183e462c8b51d13f0c7fc8aa 100644 (file)
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
                return;
 
+       spin_lock(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               spin_unlock(&sl->lock);
                sl_unlock(sl);
                return;
        }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
+       spin_unlock(&sl->lock);
 }
 
 static void sl_tx_timeout(struct net_device *dev)
index 807815fc996839d14efd18625fb300a2f48d2577..7cb105c103fe9408eb7c02b96dac4f4bfb702456 100644 (file)
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
        if (unlikely(!noblock))
                add_wait_queue(&tfile->wq.wait, &wait);
        while (len) {
-               current->state = TASK_INTERRUPTIBLE;
+               if (unlikely(!noblock))
+                       current->state = TASK_INTERRUPTIBLE;
 
                /* Read frames from the queue */
                if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       current->state = TASK_RUNNING;
-       if (unlikely(!noblock))
+       if (unlikely(!noblock)) {
+               current->state = TASK_RUNNING;
                remove_wait_queue(&tfile->wq.wait, &wait);
+       }
 
        return ret;
 }
index 3569293df8726df8c87786ad83b5be506e6b3285..846cc19c04f23f2f5a15adf65d8098cc36513eb6 100644 (file)
@@ -36,8 +36,8 @@
 #define AX_RXHDR_L4_TYPE_TCP                   16
 #define AX_RXHDR_L3CSUM_ERR                    2
 #define AX_RXHDR_L4CSUM_ERR                    1
-#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
-#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(29))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(31))
 #define AX_ACCESS_MAC                          0x01
 #define AX_ACCESS_PHY                          0x02
 #define AX_ACCESS_EEPROM                       0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info samsung_info = {
+       .description = "Samsung USB Ethernet Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
        /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
 }, {
        /* Sitecom USB 3.0 to Gigabit Adapter */
        USB_DEVICE(0x0df6, 0x0072),
-       .driver_info = (unsigned long) &sitecom_info,
+       .driver_info = (unsigned long)&sitecom_info,
+}, {
+       /* Samsung USB Ethernet Adapter */
+       USB_DEVICE(0x04e8, 0xa100),
+       .driver_info = (unsigned long)&samsung_info,
 },
        { },
 };
index 8d5cac2d8e33bee6b2545d45e83d24f52f8ac4c3..df507e6dbb9c99d9a15f718128e008f06ba11cdb 100644 (file)
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
 {
        struct catc *catc = netdev_priv(netdev);
        struct netdev_hw_addr *ha;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-       memset(broadcast, 0xff, 6);
+       memset(broadcast, 0xff, ETH_ALEN);
        memset(catc->multicast, 0, 64);
 
        catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct usb_device *usbdev = interface_to_usbdev(intf);
        struct net_device *netdev;
        struct catc *catc;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        int i, pktsz;
 
        if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                
                dev_dbg(dev, "Filling the multicast list.\n");
          
-               memset(broadcast, 0xff, 6);
+               memset(broadcast, 0xff, ETH_ALEN);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
index 7d78669000d704d8448aea5e24a8a73c312badfc..6358d420e185b4d4cb7b64a0c2a459fc5b70ed7f 100644 (file)
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
 
 static struct usb_driver usbpn_driver;
 
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
        static const char ifname[] = "usbpn%d";
        const struct usb_cdc_union_desc *union_header = NULL;
index 2dbb9460349d3659fc738eabb35f01cb0022e049..c6867f926cffc18a981c7682e5493ae36924d988 100644 (file)
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
                rx_ctl |= 0x02;
        } else if (net->flags & IFF_ALLMULTI ||
                   netdev_mc_count(net) > DM_MAX_MCAST) {
-               rx_ctl |= 0x04;
+               rx_ctl |= 0x08;
        } else if (!netdev_mc_empty(net)) {
                struct netdev_hw_addr *ha;
 
index 6312332afeba283f192cfbf0b9ca07dfbb1e52ec..e0a4a2b08e4526a14fc20e64346af85e88623ed2 100644 (file)
@@ -143,16 +143,22 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
        struct qmi_wwan_state *info = (void *)&dev->data;
        int rv = 0;
 
-       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+               atomic_read(&info->pmcount), on);
 
-       if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
-               /* need autopm_get/put here to ensure the usbcore sees the new value */
+       if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+           (!on && atomic_dec_and_test(&info->pmcount))) {
+               /* need autopm_get/put here to ensure the usbcore sees
+                * the new value
+                */
                rv = usb_autopm_get_interface(dev->intf);
                if (rv < 0)
                        goto err;
@@ -199,7 +205,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
        atomic_set(&info->pmcount, 0);
 
        /* register subdriver */
-       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+                                        4096, &qmi_wwan_cdc_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                dev_err(&info->control->dev, "subdriver registration failed\n");
                rv = PTR_ERR(subdriver);
@@ -228,7 +235,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
        struct usb_driver *driver = driver_of(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
 
-       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+                     sizeof(struct qmi_wwan_state)));
 
        /* set up initial state */
        info->control = intf;
@@ -250,7 +258,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
-                               dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC header len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        break;
@@ -260,7 +269,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
-                               dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC union len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +281,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
-                               dev_dbg(&intf->dev, "CDC ether len %u\n",  h->bLength);
+                               dev_dbg(&intf->dev, "CDC ether len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_ether = (struct usb_cdc_ether_desc *)buf;
                        break;
                }
 
-               /*
-                * Remember which CDC functional descriptors we've seen.  Works
+               /* Remember which CDC functional descriptors we've seen.  Works
                 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
                 * (0x0f) is the highest numbered
                 */
@@ -293,10 +303,14 @@ next_desc:
 
        /* Use separate control and data interfaces if we found a CDC Union */
        if (cdc_union) {
-               info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
-               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
-                       dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
-                               cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+               info->data = usb_ifnum_to_if(dev->udev,
+                                            cdc_union->bSlaveInterface0);
+               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+                   !info->data) {
+                       dev_err(&intf->dev,
+                               "bogus CDC Union: master=%u, slave=%u\n",
+                               cdc_union->bMasterInterface0,
+                               cdc_union->bSlaveInterface0);
                        goto err;
                }
        }
@@ -374,8 +388,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret;
 
-       /*
-        * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+       /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
         * in system sleep context, otherwise, the resume callback has
         * to recover device from previous suspend failure.
         */
@@ -383,7 +396,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        if (ret < 0)
                goto err;
 
-       if (intf == info->control && info->subdriver && info->subdriver->suspend)
+       if (intf == info->control && info->subdriver &&
+           info->subdriver->suspend)
                ret = info->subdriver->suspend(intf, message);
        if (ret < 0)
                usbnet_resume(intf);
@@ -396,7 +410,8 @@ static int qmi_wwan_resume(struct usb_interface *intf)
        struct usbnet *dev = usb_get_intfdata(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret = 0;
-       bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+       bool callsub = (intf == info->control && info->subdriver &&
+                       info->subdriver->resume);
 
        if (callsub)
                ret = info->subdriver->resume(intf);
@@ -714,7 +729,9 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
-       {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)},    /* Cinterion PLxx */
+       {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
+       {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
@@ -776,7 +793,8 @@ static const struct usb_device_id products[] = {
 };
 MODULE_DEVICE_TABLE(usb, products);
 
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+                         const struct usb_device_id *prod)
 {
        struct usb_device_id *id = (struct usb_device_id *)prod;
 
index 7b331e613e02aec22c3ac225e3a99c53c5f0f822..90a429b7ebad8497d317639389c041d534366255 100644 (file)
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
        if (num_sgs == 1)
                return 0;
 
-       urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC);
+       /* reserve one for zero packet */
+       urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
+                         GFP_ATOMIC);
        if (!urb->sg)
                return -ENOMEM;
 
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                if (build_dma_sg(skb, urb) < 0)
                        goto drop;
        }
-       entry->length = length = urb->transfer_buffer_length;
+       length = urb->transfer_buffer_length;
 
        /* don't assume the hardware handles USB_ZERO_PACKET
         * NOTE:  strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (length % dev->maxpacket == 0) {
                if (!(info->flags & FLAG_SEND_ZLP)) {
                        if (!(info->flags & FLAG_MULTI_PACKET)) {
-                               urb->transfer_buffer_length++;
-                               if (skb_tailroom(skb)) {
+                               length++;
+                               if (skb_tailroom(skb) && !urb->num_sgs) {
                                        skb->data[skb->len] = 0;
                                        __skb_put(skb, 1);
-                               }
+                               } else if (urb->num_sgs)
+                                       sg_set_buf(&urb->sg[urb->num_sgs++],
+                                                       dev->padding_pkt, 1);
                        }
                } else
                        urb->transfer_flags |= URB_ZERO_PACKET;
        }
+       entry->length = urb->transfer_buffer_length = length;
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
 
        usb_kill_urb(dev->interrupt);
        usb_free_urb(dev->interrupt);
+       kfree(dev->padding_pkt);
 
        free_netdev(net);
 }
@@ -1679,9 +1685,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        /* initialize max rx_qlen and tx_qlen */
        usbnet_update_max_qlen(dev);
 
+       if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
+               !(info->flags & FLAG_MULTI_PACKET)) {
+               dev->padding_pkt = kzalloc(1, GFP_KERNEL);
+               if (!dev->padding_pkt) {
+                       status = -ENOMEM;
+                       goto out4;
+               }
+       }
+
        status = register_netdev (net);
        if (status)
-               goto out4;
+               goto out5;
        netif_info(dev, probe, dev->net,
                   "register '%s' at usb-%s-%s, %s, %pM\n",
                   udev->dev.driver->name,
@@ -1699,6 +1714,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 
        return 0;
 
+out5:
+       kfree(dev->padding_pkt);
 out4:
        usb_free_urb(dev->interrupt);
 out3:
index eee1f19ef1e9397469e343133490ec6972b4bbdc..b2d034791e1551ab07d7782f5019a0ee4fb86052 100644 (file)
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
        return tot;
 }
 
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
 static int veth_open(struct net_device *dev)
 {
        struct veth_priv *priv = netdev_priv(dev);
@@ -250,6 +255,7 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_start_xmit      = veth_xmit,
        .ndo_change_mtu      = veth_change_mtu,
        .ndo_get_stats64     = veth_get_stats64,
+       .ndo_set_rx_mode     = veth_set_multicast_list,
        .ndo_set_mac_address = eth_mac_addr,
 };
 
index defec2b3c5a408ff035889d015d717f040e2864c..9fbdfcd1e1a0693e96edd908928953097b99742d 100644 (file)
@@ -938,7 +938,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
                return -EINVAL;
        } else {
                vi->curr_queue_pairs = queue_pairs;
-               schedule_delayed_work(&vi->refill, 0);
+               /* virtnet_open() will refill when device is going to up. */
+               if (dev->flags & IFF_UP)
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        return 0;
@@ -1116,6 +1118,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
 {
        struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
 
+       mutex_lock(&vi->config_lock);
+
+       if (!vi->config_enable)
+               goto done;
+
        switch(action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
@@ -1128,6 +1135,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
        default:
                break;
        }
+
+done:
+       mutex_unlock(&vi->config_lock);
        return NOTIFY_OK;
 }
 
@@ -1733,7 +1743,9 @@ static int virtnet_restore(struct virtio_device *vdev)
        vi->config_enable = true;
        mutex_unlock(&vi->config_lock);
 
+       rtnl_lock();
        virtnet_set_queues(vi, vi->curr_queue_pairs);
+       rtnl_unlock();
 
        return 0;
 }
index a03f358fd58b9a44fc480ea33c67adf28e3cf0ef..12040a35d95d17223e5200b0a2716e288f31163d 100644 (file)
@@ -410,9 +410,9 @@ int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
                      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
 vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
index d1292fe746bc2eb3962a57df299e49f7f9bc2d50..da8479479d01cfa9a01b17317ec24f2f4526c592 100644 (file)
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 
        spin_lock(&vn->sock_lock);
        hlist_del_rcu(&vs->hlist);
-       smp_wmb();
-       vs->sock->sk->sk_user_data = NULL;
+       rcu_assign_sk_user_data(vs->sock->sk, NULL);
        vxlan_notify_del_rx_port(sk);
        spin_unlock(&vn->sock_lock);
 
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        port = inet_sk(sk)->inet_sport;
 
-       smp_read_barrier_depends();
-       vs = (struct vxlan_sock *)sk->sk_user_data;
+       vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
 
@@ -2089,7 +2087,7 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(dev_net(dev), &low, &high);
        vxlan->port_min = low;
        vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        atomic_set(&vs->refcnt, 1);
        vs->rcv = rcv;
        vs->data = data;
-       smp_wmb();
-       vs->sock->sk->sk_user_data = vs;
+       rcu_assign_sk_user_data(vs->sock->sk, vs);
 
        spin_lock(&vn->sock_lock);
        hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
index 3f0c4f268751030318dd920a2a81bf2ddd1d328d..bcfff0d62de4f2070d5ac644a3becfedb74e3462 100644 (file)
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
        }
 
        i = port->index;
+       memset(&sync, 0, sizeof(sync));
        sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
        /* Lucky card and linux use same encoding here */
        sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
index 3d80e4267de83f3fae3e3aedf611ad1b3dbd3e2a..3d741663fd677a42eb2a3823e54412cf8afe7ee0 100644 (file)
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        /* We want a fast IRQ for this device. Actually we'd like an even faster
           IRQ ;) - This is one driver RtLinux is made for */
 
-       if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+       if (request_irq(irq, z8530_interrupt, 0,
                        "Hostess SV11", sv) < 0) {
                pr_warn("IRQ %d already in use\n", irq);
                goto err_irq;
index 4f7748478984750fa4a9872341f3db5f36d802a8..27860b4f59081a46f71ad4f5baa2f7605ef8e17c 100644 (file)
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
        /* We want a fast IRQ for this device. Actually we'd like an even faster
           IRQ ;) - This is one driver RtLinux is made for */
 
-       if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+       if (request_irq(irq, z8530_interrupt, 0,
                        "SeaLevel", dev) < 0) {
                pr_warn("IRQ %d already in use\n", irq);
                goto err_request_irq;
index 6a24a5a70cc7d4459e04e0348882f0324dd4fad3..4c0a69779b8980a16ccca5a90acc5ece605cf06a 100644 (file)
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        ifr->ifr_settings.size = size; /* data size wanted */
                        return -ENOBUFS;
                }
+               memset(&line, 0, sizeof(line));
                line.clock_type = get_status(port)->clocking;
                line.clock_rate = 0;
                line.loopback = 0;
index 8f0fc2e57e2be06fce081c6f835bb0ea72243859..f57ee67836aea02fc58ff782e38a1b8e3302c94d 100644 (file)
@@ -41,6 +41,6 @@ struct x25_asy {
 
 #define X25_ASY_MAGIC 0x5303
 
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
 
 #endif /* _LINUX_X25_ASY.H */
index f29d554fc07d4f893c0857593f0b108fa9b4a614..2416a9d60bd69e4781efcede38e93af611264e00 100644 (file)
@@ -395,20 +395,19 @@ struct z8530_dev
 extern u8 z8530_dead_port[];
 extern u8 z8530_hdlc_kilostream_85230[];
 extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
-                                         struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
 
 
 /*
index 9f1e947f3557823bc228549a2fd9b0636fed55b2..649ecad6844c73baa58a9e8b0e1850b9fdb88b36 100644 (file)
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
        i2400mu->rx_size_auto_shrink = 1;
 }
 
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
 
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
 
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
 
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
-                                      const struct i2400m_bootrom_header *,
-                                      size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
-                                          struct i2400m_bootrom_header *,
-                                          size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+                               const struct i2400m_bootrom_header *, size_t,
+                               int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+                                   struct i2400m_bootrom_header *, size_t);
 #endif /* #ifndef __I2400M_USB_H__ */
index 79c6505b5c2030c0f3b0813b97aef5fdc8780278..5a34e72bab9afb42d670f7ea72286c9e191a31e2 100644 (file)
@@ -710,18 +710,18 @@ enum i2400m_bri {
        I2400M_BRI_MAC_REINIT = 1 << 3,
 };
 
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
 static inline
 int i2400m_is_d2h_barker(const void *buf)
 {
        const __le32 *barker = buf;
        return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
 }
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
 
 /* Make/grok boot-rom header commands */
 
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
 /*
  * Driver / device setup and internal functions
  */
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
-                         const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
-                          enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+                  int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
 enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
 
 #ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
 #else
 static inline int i2400m_debugfs_add(struct i2400m *i2400m)
 {
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
 #endif
 
 /* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
 
 extern struct attribute_group i2400m_dev_attr_group;
 
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
        dev_put(i2400m->wimax_dev.net_dev);
 }
 
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
 
 /*
  * _setup()/_release() are called by the probe/disconnect functions of
  * the bus-specific drivers.
  */
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
 
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
 
 
 /*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
        return i2400m->wimax_dev.net_dev->dev.parent;
 }
 
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
-                                  char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
-                                const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
-                              const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                         size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                       size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
 
 static inline
 struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
        return &iface->cur_altsetting->endpoint[ep].desc;
 }
 
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
-                                     enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
-       struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+                                         const struct i2400m_tlv_rf_switches_status *);
 
 /*
  * Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
 
 
 /* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
 
 
 
index 46e640a6968d0cd90c05f2d09b696844d348ef81..3cfe3ee90dbe1d802d7c3b8b4fe0c7cd8c815ee9 100644 (file)
@@ -38,9 +38,9 @@ enum ath10k_debug_mask {
 
 extern unsigned int ath10k_debug_mask;
 
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
 
 #ifdef CONFIG_ATH10K_DEBUGFS
 int ath10k_debug_start(struct ath10k *ar);
@@ -85,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
                                      const char *fmt, ...);
 void ath10k_dbg_dump(enum ath10k_debug_mask mask,
                     const char *msg, const char *prefix,
index 77238afbed759ae11baaaae0d8f168af16251625..ccf3597fd9e2511c11cce44e136c3d1c50c59d06 100644 (file)
@@ -2728,7 +2728,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
        cmd = (struct wmi_vdev_up_cmd *)skb->data;
        cmd->vdev_id       = __cpu_to_le32(vdev_id);
        cmd->vdev_assoc_id = __cpu_to_le32(aid);
-       memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+       memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
@@ -2896,7 +2896,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
        cmd->vdev_id     = __cpu_to_le32(vdev_id);
        cmd->param_id    = __cpu_to_le32(param_id);
        cmd->param_value = __cpu_to_le32(param_value);
-       memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+       memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi vdev %d peer 0x%pM set param %d value %d\n",
index 48161edec8de84769fd9f3db92fa1c4aa165d70b..69f58b073e85ff1a183ec1f06e803ff9da00806c 100644 (file)
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
        ah->stats.tx_bytes_count += skb->len;
        info = IEEE80211_SKB_CB(skb);
 
+       size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+       memcpy(info->status.rates, bf->rates, size);
+
        tries[0] = info->status.rates[0].count;
        tries[1] = info->status.rates[1].count;
        tries[2] = info->status.rates[2].count;
 
        ieee80211_tx_info_clear_status(info);
 
-       size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
-       memcpy(info->status.rates, bf->rates, size);
-
        for (i = 0; i < ts->ts_final_idx; i++) {
                struct ieee80211_tx_rate *r =
                        &info->status.rates[i];
index 98a886154d9cc59775eaa0c17c74305452e52e8e..05debf700a846db00f55e0071df53e207d2e6c63 100644 (file)
@@ -22,8 +22,7 @@
 
 #define ATH6KL_MAX_IE                  256
 
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
 
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
index 74369de00fb57a40dcebd72ced2002d0cb571b24..ca9ba005f2871f3e42bbc914bb5ca90f6e8b90e9 100644 (file)
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
 };
 
 extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
 
 enum ath6kl_war {
        ATH6KL_WAR_INVALID_RATE,
index 4c3bbe4f309533a04f4262913ac4c7c074a8d73c..e7a38d844a6a4e7a2e9ba738e56606d946fea47a 100644 (file)
@@ -999,7 +999,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct ath9k_vif_iter_data *iter_data);
index 98964b02f1390a45b65559b65e875bce1b369bf8..74f452c7b1667c47a65506a077042f2b0668c3a8 100644 (file)
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        unsigned long flags;
+       int i;
 
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                }
        work:
                ath_restart_work(sc);
+
+               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+                       if (!ATH_TXQ_SETUP(sc, i))
+                               continue;
+
+                       spin_lock_bh(&sc->tx.txq[i].axq_lock);
+                       ath_txq_schedule(sc, &sc->tx.txq[i]);
+                       spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+               }
        }
 
        ieee80211_wake_queues(sc->hw);
@@ -619,21 +629,10 @@ chip_reset:
 
 static int ath_reset(struct ath_softc *sc)
 {
-       int i, r;
+       int r;
 
        ath9k_ps_wakeup(sc);
-
        r = ath_reset_internal(sc, NULL);
-
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (!ATH_TXQ_SETUP(sc, i))
-                       continue;
-
-               spin_lock_bh(&sc->tx.txq[i].axq_lock);
-               ath_txq_schedule(sc, &sc->tx.txq[i]);
-               spin_unlock_bh(&sc->tx.txq[i].axq_lock);
-       }
-
        ath9k_ps_restore(sc);
 
        return r;
index b1e74683b8dc7fcac0d27db46672a8c5e4c2e407..95ddca5495d492cb5a1bf4bb99d8ee3283a9e1ec 100644 (file)
@@ -1342,13 +1342,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
                return;
 
-       /*
-        * All MPDUs in an aggregate will use the same LNA
-        * as the first MPDU.
-        */
-       if (rs->rs_isaggr && !rs->rs_firstaggr)
-               return;
-
        /*
         * Change the default rx antenna if rx diversity
         * chooses the other antenna 3 times in a row.
index bea5caa11d4ab4b260b2a8a0b096c9613b472361..09cdbcd097394a3a2c324230c2743f5d181b0900 100644 (file)
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
        tbf->bf_buf_addr = bf->bf_buf_addr;
        memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
        tbf->bf_state = bf->bf_state;
+       tbf->bf_state.stale = false;
 
        return tbf;
 }
@@ -1390,11 +1391,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
                      u16 tid, u16 *ssn)
 {
        struct ath_atx_tid *txtid;
+       struct ath_txq *txq;
        struct ath_node *an;
        u8 density;
 
        an = (struct ath_node *)sta->drv_priv;
        txtid = ATH_AN_2_TID(an, tid);
+       txq = txtid->ac->txq;
+
+       ath_txq_lock(sc, txq);
 
        /* update ampdu factor/density, they may have changed. This may happen
         * in HT IBSS when a beacon with HT-info is received after the station
@@ -1418,6 +1423,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
        txtid->baw_head = txtid->baw_tail = 0;
 
+       ath_txq_unlock_complete(sc, txq);
+
        return 0;
 }
 
@@ -1556,8 +1563,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        __skb_unlink(bf->bf_mpdu, tid_q);
                        list_add_tail(&bf->list, &bf_q);
                        ath_set_rates(tid->an->vif, tid->an->sta, bf);
-                       ath_tx_addto_baw(sc, tid, bf);
-                       bf->bf_state.bf_type &= ~BUF_AGGR;
+                       if (bf_isampdu(bf)) {
+                               ath_tx_addto_baw(sc, tid, bf);
+                               bf->bf_state.bf_type &= ~BUF_AGGR;
+                       }
                        if (bf_tail)
                                bf_tail->bf_next = bf;
 
@@ -1944,7 +1953,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                        if (bf_is_ampdu_not_probing(bf))
                                txq->axq_ampdu_depth++;
 
-                       bf = bf->bf_lastbf->bf_next;
+                       bf_last = bf->bf_lastbf;
+                       bf = bf_last->bf_next;
+                       bf_last->bf_next = NULL;
                }
        }
 }
@@ -1952,15 +1963,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
                               struct ath_atx_tid *tid, struct sk_buff *skb)
 {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ath_frame_info *fi = get_frame_info(skb);
        struct list_head bf_head;
-       struct ath_buf *bf;
-
-       bf = fi->bf;
+       struct ath_buf *bf = fi->bf;
 
        INIT_LIST_HEAD(&bf_head);
        list_add_tail(&bf->list, &bf_head);
        bf->bf_state.bf_type = 0;
+       if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+               bf->bf_state.bf_type = BUF_AMPDU;
+               ath_tx_addto_baw(sc, tid, bf);
+       }
 
        bf->bf_next = NULL;
        bf->bf_lastbf = bf;
index 61c302a6bdeaa38ef1a1d20b3ac1e477db794745..5b340769d5bb2196bf3d0192e007ed13f23236fc 100644 (file)
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
        conn.channel = ch - 1;
 
-       memcpy(conn.bssid, bss->bssid, 6);
-       memcpy(conn.dst_mac, bss->bssid, 6);
+       memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+       memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
        /*
         * FW don't support scan after connection attempt
         */
index 9c35479790b69f2621ec18e7863565818b505ef7..0d950f209dae01efd23ebe9956ac2dc68daa7063 100644 (file)
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        if (priv->wep_is_on)
                frame_ctl |= IEEE80211_FCTL_PROTECTED;
        if (priv->operating_mode == IW_MODE_ADHOC) {
-               skb_copy_from_linear_data(skb, &header.addr1, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               memcpy(&header.addr3, priv->BSSID, 6);
+               skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
        } else {
                frame_ctl |= IEEE80211_FCTL_TODS;
-               memcpy(&header.addr1, priv->CurrentBSSID, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               skb_copy_from_linear_data(skb, &header.addr3, 6);
+               memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
        }
 
        if (priv->use_wpa)
-               memcpy(&header.addr4, SNAP_RFC1024, 6);
+               memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
 
        header.frame_control = cpu_to_le16(frame_ctl);
        /* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
                }
        }
 
-       memcpy(skbp, header->addr1, 6); /* destination address */
+       memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(&skbp[6], header->addr3, 6);
+               memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
        else
-               memcpy(&skbp[6], header->addr2, 6); /* source address */
+               memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
 
        skb->protocol = eth_type_trans(skb, priv->dev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
                         u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
                         u8 frag_no, int more_frags)
 {
-       u8 mac4[6];
-       u8 source[6];
+       u8 mac4[ETH_ALEN];
+       u8 source[ETH_ALEN];
        struct sk_buff *skb;
 
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(source, header->addr3, 6);
+               memcpy(source, header->addr3, ETH_ALEN);
        else
-               memcpy(source, header->addr2, 6);
+               memcpy(source, header->addr2, ETH_ALEN);
 
        rx_packet_loc += 24; /* skip header */
 
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
                msdu_size -= 4;
 
        if (frag_no == 0) { /* first fragment */
-               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
-               msdu_size -= 6;
-               rx_packet_loc += 6;
+               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+               msdu_size -= ETH_ALEN;
+               rx_packet_loc += ETH_ALEN;
 
                if (priv->do_rx_crc)
                        crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_seq = seq_no;
                priv->frag_no = 1;
                priv->frag_len = msdu_size;
-               memcpy(priv->frag_source, source, 6);
-               memcpy(&priv->rx_buf[6], source, 6);
-               memcpy(priv->rx_buf, header->addr1, 6);
+               memcpy(priv->frag_source, source, ETH_ALEN);
+               memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+               memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
 
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                        }
                }
 
        } else if (priv->frag_no == frag_no &&
                   priv->frag_seq == seq_no &&
-                  memcmp(priv->frag_source, source, 6) == 0) {
+                  memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
                                   rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                                more_frags = 1; /* don't send broken assembly */
                        }
                }
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_no++;
 
                if (!more_frags) { /* last one */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
                        if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
                                priv->dev->stats.rx_dropped++;
                        } else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
                        atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
                        /* we use the same buffer for frag reassembly and control packets */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
 
                        if (priv->do_rx_crc) {
                                /* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
        priv->last_qual = jiffies;
        priv->last_beacon_timestamp = 0;
        memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-       memset(priv->BSSID, 0, 6);
+       memset(priv->BSSID, 0, ETH_ALEN);
        priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
        priv->station_was_associated = 0;
 
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
                         char *extra)
 {
        struct atmel_private *priv = netdev_priv(dev);
-       memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+       memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
 
        return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
        for (i = 0; i < priv->BSS_list_entries; i++) {
                iwe.cmd = SIOCGIWAP;
                iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
                current_ev = iwe_stream_add_event(info, current_ev,
                                                  extra + IW_SCAN_MAX_DATA,
                                                  &iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
 static void atmel_scan(struct atmel_private *priv, int specific_ssid)
 {
        struct {
-               u8 BSSID[6];
+               u8 BSSID[ETH_ALEN];
                u8 SSID[MAX_SSID_LENGTH];
                u8 scan_type;
                u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
                u8 SSID_size;
        } cmd;
 
-       memset(cmd.BSSID, 0xff, 6);
+       memset(cmd.BSSID, 0xff, ETH_ALEN);
 
        if (priv->fast_scan) {
                cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+       memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
        cmd.channel = (priv->channel & 0x7f);
        cmd.BSS_type = type;
        cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->BSSID, 6);
+       memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
        cmd.BSS_type = type;
        cmd.channel = (priv->channel & 0x7f);
 
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
        header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
                /* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        struct ass_req_format {
                __le16 capability;
                __le16 listen_interval;
-               u8 ap[6]; /* nothing after here directly accessible */
+               u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
                u8 ssid_el_id;
                u8 ssid_len;
                u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
 
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
        if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
 
        /* current AP address - only in reassoc frame */
        if (is_reassoc) {
-               memcpy(body.ap, priv->CurrentBSSID, 6);
+               memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
                ssid_el_p = &body.ssid_el_id;
                bodysize = 18 + priv->SSID_size;
        } else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
        int i, index;
 
        for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
-               if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+               if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
                        index = i;
 
        /* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
                if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
                        return;
                index = priv->BSS_list_entries++;
-               memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+               memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
                priv->BSSinfo[index].RSSI = rssi;
        } else {
                if (rssi > priv->BSSinfo[index].RSSI)
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
 {
        struct bss_info *bss =  &priv->BSSinfo[bss_index];
 
-       memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+       memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
        memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
 
        /* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
                                0x00, 0x04, 0x25, 0x00, 0x00, 0x00
                        };
                        printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
-                       memcpy(dev->dev_addr, default_mac, 6);
+                       memcpy(dev->dev_addr, default_mac, ETH_ALEN);
                }
        }
 
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        struct { /* NB this is matched to the hardware, don't change. */
                u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
-               u8 receiver_address[6];
+               u8 receiver_address[ETH_ALEN];
                u8 wep_is_on;
                u8 default_key; /* 0..3 */
                u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        mib.wep_is_on = priv->wep_is_on;
        mib.exclude_unencrypted = priv->exclude_unencrypted;
-       memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+       memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
 
        /* zero all the keys before adding in valid ones. */
        memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
index c51d2dc489e45bd3a9b2aac36bf9fa2130453e72..1d7982afc0ad6af6b1b32f92ae07fb9ef694f2e1 100644 (file)
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
        /* Try to set the DMA mask. If it fails, try falling back to a
         * lower mask, as we can always also support a lower one. */
        while (1) {
-               err = dma_set_mask(dev->dev->dma_dev, mask);
-               if (!err) {
-                       err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-                       if (!err)
-                               break;
-               }
+               err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+               if (!err)
+                       break;
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
                        fallback = true;
index 7c970d3ae358834a4230f35826d6c598661ee323..05ee7f10cc8f577532e9180ac9874adcf9343d0f 100644 (file)
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
                }
                en_addr = en_addrs[override][i];
 
-               val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+               if (e)
+                       val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
 
                if (off) {
                        b43_phy_mask(dev, en_addr, ~en_mask);
index 8cb206a89083aaa314868ef2c07b906181fe6689..4ae63f4ddfb20394d1a38c4847b5c5732887f277 100644 (file)
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
        else
                txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb == rate) ||
index 42eb26c99e11cea54a2b063de4120ec69ae33b22..b2ed1795130bb0d7e1f10f95ab9775743b5f276b 100644 (file)
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
        /* Try to set the DMA mask. If it fails, try falling back to a
         * lower mask, as we can always also support a lower one. */
        while (1) {
-               err = dma_set_mask(dev->dev->dma_dev, mask);
-               if (!err) {
-                       err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-                       if (!err)
-                               break;
-               }
+               err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+               if (!err)
+                       break;
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
                        fallback = true;
index 849a28c803023e03570d69ca8d6de8f60462a6c6..86588c9ff0f2b6cddee0b3a7da5455fd05b29a9b 100644 (file)
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
        rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
 
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb->hw_value == rate) ||
index c768ec2d473d72afcb6c8d1748a681765460086c..905704e335d7164b90a4ecb6fd51c2324911c154 100644 (file)
@@ -476,8 +476,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
 
 static int brcmf_sdio_pd_probe(struct platform_device *pdev)
 {
-       int ret;
-
        brcmf_dbg(SDIO, "Enter\n");
 
        brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
@@ -485,11 +483,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
        if (brcmfmac_sdio_pdata->power_on)
                brcmfmac_sdio_pdata->power_on();
 
-       ret = sdio_register_driver(&brcmf_sdmmc_driver);
-       if (ret)
-               brcmf_err("sdio_register_driver failed: %d\n", ret);
-
-       return ret;
+       return 0;
 }
 
 static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -512,6 +506,15 @@ static struct platform_driver brcmf_sdio_pd = {
        }
 };
 
+void brcmf_sdio_register(void)
+{
+       int ret;
+
+       ret = sdio_register_driver(&brcmf_sdmmc_driver);
+       if (ret)
+               brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
 void brcmf_sdio_exit(void)
 {
        brcmf_dbg(SDIO, "Enter\n");
@@ -522,18 +525,13 @@ void brcmf_sdio_exit(void)
                sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
 
-void brcmf_sdio_init(void)
+void __init brcmf_sdio_init(void)
 {
        int ret;
 
        brcmf_dbg(SDIO, "Enter\n");
 
        ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
-       if (ret == -ENODEV) {
-               brcmf_dbg(SDIO, "No platform data available, registering without.\n");
-               ret = sdio_register_driver(&brcmf_sdmmc_driver);
-       }
-
-       if (ret)
-               brcmf_err("driver registration failed: %d\n", ret);
+       if (ret == -ENODEV)
+               brcmf_dbg(SDIO, "No platform data available.\n");
 }
index 4de9aac6666dd25cacdcde231a49de06d6e794aa..899a2ada5b8212f28e30632551910b67474e6080 100644 (file)
@@ -630,29 +630,29 @@ struct brcmf_skb_reorder_data {
        u8 *reorder;
 };
 
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 /* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
 /* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-                                      uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-                                   void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                              void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                            void *buf, uint len);
 
 /* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-                              struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+                       struct sk_buff *rxp);
 
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
-                                    s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+                             char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
-                            bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+                     bool success);
 
 #endif                         /* _BRCMF_H_ */
index 200ee9b485bf0e0f31ba2e6b7cee81f095cb97bc..a6eb09e5d46f408acf9205937ebffd772b15846d 100644 (file)
@@ -132,34 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
  * interface functions from common layer
  */
 
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
-                        struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+                     int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
 /* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
-                            bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_init(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
 #endif
 
 #endif                         /* _BRCMF_BUS_H_ */
index 42bf19a2eeee3beec051d6184066c34eef2a2dec..64e9cff241b9156745bab110010520bfba730394 100644 (file)
@@ -1225,21 +1225,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
        return bus->chip << 4 | bus->chiprev;
 }
 
-static void brcmf_driver_init(struct work_struct *work)
+static void brcmf_driver_register(struct work_struct *work)
 {
-       brcmf_debugfs_init();
-
 #ifdef CONFIG_BRCMFMAC_SDIO
-       brcmf_sdio_init();
+       brcmf_sdio_register();
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-       brcmf_usb_init();
+       brcmf_usb_register();
 #endif
 }
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
 
 static int __init brcmfmac_module_init(void)
 {
+       brcmf_debugfs_init();
+#ifdef CONFIG_BRCMFMAC_SDIO
+       brcmf_sdio_init();
+#endif
        if (!schedule_work(&brcmf_driver_work))
                return -EBUSY;
 
index ef9179883748f0aa9637900a6e7bab1b476cd937..53c6e710f2cb243c9997e9bb6f453a60b3f7634b 100644 (file)
  */
 
 /* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
 
 /* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
 
 /* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
 
 /* Add any protocol-specific data header.
  * Caller must reserve prot_hdrlen prepend space.
  */
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
-                               struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+                        struct sk_buff *txp);
 
 /* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
 
 #endif                         /* _BRCMF_PROTO_H_ */
index 076b83c7c8964628e778b1247a2c4b7965eb8d46..507c61c991fa06c96b6adb1a0f99acefa5f74068 100644 (file)
@@ -223,17 +223,16 @@ struct sdpcmd_regs {
        u16 PAD[0x80];
 };
 
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-                                 struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-                                             struct chip_info *ci,
-                                             u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-                                          struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-                                         struct chip_info *ci, char *nvram_dat,
-                                         uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+                          struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                                      struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+                                   struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+                                  struct chip_info *ci, char *nvram_dat,
+                                  uint nvram_sz);
 
 #endif         /* _BRCMFMAC_SDIO_CHIP_H_ */
index 1b034ea46f932ff4c8aea837b88603861b2e44c0..bfadcb836b6dca4cf6e11279b801d44a2f993360 100644 (file)
@@ -185,18 +185,18 @@ struct brcmf_sdio_dev {
 };
 
 /* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                                  void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+                     int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+                     int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                           void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -210,22 +210,18 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  * Returns 0 or error code.
  * NOTE: Async operation is not currently supported.
  */
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                       uint flags, struct sk_buff_head *pktq, uint totlen);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                           uint flags, struct sk_buff_head *pktq,
+                           uint totlen);
 
 /* Flags bits */
 
@@ -241,46 +237,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  *   nbytes:   number of bytes to transfer to/from buf
  * Returns 0 or error code.
  */
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
-                              u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
-                           u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+                       u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+                    u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
 
 /* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
 
 /* attach, return handler on success, NULL if failed.
  *  The handler shall be provided by all subsequent calls. No local cache
  *  cfghdl points to the starting address of pci device mapped memory
  */
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
 
 /* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
-                                   uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u8 *byte);
 
 /* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
-                        uint rw, uint fnc, uint addr,
-                        u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u32 *word, uint nbyte);
 
 /* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
-                                   bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
 
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
-                                wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+                         wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif                         /* _BRCM_SDH_H_ */
index bf6758d95600bba97bbb43f273d4b2c571fa590f..422f44c631756b2332dc10d4c1fb24c8172343ff 100644 (file)
@@ -1536,7 +1536,7 @@ void brcmf_usb_exit(void)
        brcmf_release_fw(&fw_image_list);
 }
 
-void brcmf_usb_init(void)
+void brcmf_usb_register(void)
 {
        brcmf_dbg(USB, "Enter\n");
        INIT_LIST_HEAD(&fw_image_list);
index a8a267b5b87aebfd6bd40006c628c7964afc9cca..2d08c155c23bcd93afba34a0f28d17e2e68e46c5 100644 (file)
@@ -172,19 +172,19 @@ struct si_info {
 
 
 /* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
 
 /* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
 
 static inline u32 ai_get_cccaps(struct si_pub *sih)
 {
index 73d01e5861090d99d875c886ae8783c9dff5561c..03bdcf29bd50ee228d269b4ae4359159ffa1df9d 100644 (file)
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
        u16 dma_len;
 };
 
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
-                                       struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
-                                  struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+                                struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+                           struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
 
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
-                                struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+                             struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
 
 #endif                         /* _BRCM_AMPDU_H_ */
index 97ea3881a8ec7c969ee40839b65878650def99e6..a3d487ab19646bcc2dc2be25accc559052bc7afc 100644 (file)
 #ifndef _BRCM_ANTSEL_H_
 #define _BRCM_ANTSEL_H_
 
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
-                                 bool sel,
-                                 u8 id, u8 fbid, u8 *antcfg,
-                                 u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+                              u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
 
 #endif /* _BRCM_ANTSEL_H_ */
index 006483a0abe6452d022600666b5ddf9ad5d28248..39dd3a5b2979a572613c5335473cd121295b4649 100644 (file)
 
 #define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
 
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
 
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
 
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
-                                     u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
 
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
-                                  u16 chanspec,
-                                  struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
-                                    u16 chanspec,
-                                    u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                               struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                                 u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
 
 #endif                         /* _WLC_CHANNEL_H */
index 3a6544710c8ab222cc126ef53d5f19143498cc9e..edc5d105ff980e40e1ad2221a4c7686f87b0cea1 100644 (file)
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (err != 0)
                brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
                          __func__, err);
+
+       bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
        return err;
 }
 
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
                return;
        }
 
+       bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
+
        /* put driver in down state */
        spin_lock_bh(&wl->lock);
        brcms_down(wl);
index 4090032e81a29f5d4e9aaf0b9258ae8d8c80bfcc..198053dfc3102ccb17a12b051372d84ff720a32e 100644 (file)
@@ -88,26 +88,26 @@ struct brcms_info {
 };
 
 /* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
-                               bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+                        bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
 
 /* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
-                                     void (*fn) (void *arg), void *arg,
-                                     const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+                                    void (*fn) (void *arg), void *arg,
+                                    const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
 
 #endif                         /* _BRCM_MAC80211_IF_H_ */
index df6229ed52c8d23289d8ec41eff2e32be24c4146..8138f1cff4e56a3bd801dc48f1917963012653ca 100644 (file)
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
 
        /* If macaddr exists, use it (Sromrev4, CIS, ...). */
        if (!is_zero_ether_addr(sprom->il0mac)) {
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
                return;
        }
 
        if (wlc_hw->_nbands > 1)
-               memcpy(etheraddr, sprom->et1mac, 6);
+               memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
        else
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
 }
 
 /* power both the pll and external oscillator on/off */
index b5d7a38b53fe3d8baf6f8cbc28ba8cef0aaa2b8a..c4d135cff04ad2f7883c783fb96244bbbabfa370 100644 (file)
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
        struct brcms_bss_info *current_bss;
 };
 
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
-                          struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
-                  uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
-                               uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
-                                            u32 rspec,
-                                            bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
-                                     u32 rts_rate,
-                                     u32 frame_rate,
-                                     u8 rts_preamble_type,
-                                     u8 frame_preamble_type, uint frame_len,
-                                     bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
-                              struct ieee80211_sta *sta,
-                              void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
-                                         u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
-                                    u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
-                                 u16 chanspec,
-                                 bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
-                             u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
-                       u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
-                                       u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
-                                         u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
-                                      int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
-                                  uint offset, const void *buf, int len,
-                                  u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
-                                    void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
-                                   u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+                          uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+                              bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+                              u32 rts_rate, u32 frame_rate,
+                              u8 rts_preamble_type, u8 frame_preamble_type,
+                              uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+                           struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+                         bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+                int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+                                    u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+                                      u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+                               int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                          const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                            void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
 
 #endif                         /* _BRCM_MAIN_H_ */
index e34a71e7d24204a0cc6dd0a3cb12fd043ec2636a..4d3734f48d9c7a444ef7aab2383655b471a0a5f2 100644 (file)
@@ -179,121 +179,106 @@ struct shared_phy_params {
 };
 
 
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-                                           struct bcma_device *d11core,
-                                           int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
-                                  u16 *phyrev, u16 *radioid,
-                                  u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
-                                u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
-                                      u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
-                               struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
-                                                bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
-                                         struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
-                                        uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
-                                     u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
-                                             uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
-                                           uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
-                                     struct txpwr_limits *,
-                                     u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
-                              bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
-                              bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
-                                      struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
-                                       bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
-                                  u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
-                                 u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
-                                 u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
-                                u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
-                                       struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
-                                     u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
-                                     bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
-                                            u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+                                    struct bcma_device *d11core, int bandtype,
+                                    struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+                           u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+                                         bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+                                  struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+                              u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+                                      u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+                                    s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+                              u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+                               struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+                                struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+                                     u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
 #endif                          /* _BRCM_PHY_HAL_H_ */
index 1dc767c31653b29a9458cc08b9d113ab1c6979ad..4960f7d2680430313d42e3e7e64787a21c0f8dde 100644 (file)
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
        u8 do_init_g;
 };
 
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
-                         u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
-                              const struct phytbl_info *ptbl_info,
-                              u16 tblAddr, u16 tblDataHi,
-                              u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
-                               const struct phytbl_info *ptbl_info,
-                               u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
-                              uint tbl_offset, u16 tblAddr, u16 tblDataHi,
-                              u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
-                                            struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
-                                   const struct radio_regs *radioregs,
-                                   u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
-                                       s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
-                                     u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
-                                       u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
-                                             u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
-                                    u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
-                                              u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
-                                           u8 rate_mcs_end,
-                                           u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
-                                           u8 rate_ofdm_start,
-                                           u8 rate_ofdm_end,
-                                           u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+                       const struct phytbl_info *ptbl_info,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+                        const struct phytbl_info *ptbl_info,
+                        u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+                                     struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+                            const struct radio_regs *radioregs,
+                            u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+                             bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+                                       u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+                                    u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+                                    u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
 #define LCNPHY_TX_POWER_TABLE_SIZE     128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TX_PWR_CTRL_TEMPBASED   0xE001
 
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
-                                  const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
-                                 struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
-                                     u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
-                               s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+                           const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+                              u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
 
 #define NPHY_MAX_HPVGA1_INDEX          10
 #define NPHY_DEF_HPVGA1_INDEXLIMIT     7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
        u32 q_pwr;
 };
 
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
-                                              bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 
 #define wlc_phy_write_table_nphy(pi, pti) \
        wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 #define wlc_nphy_table_data_write(pi, w, v) \
        wlc_phy_table_data_write((pi), (w), (v))
 
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
-                                   u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
-                                    u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+                            void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+                             const void *);
 
 #define        PHY_IPA(pi) \
        ((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
        if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
                (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
-                                  u16 num_samps, u8 wait_time,
-                                  u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
-                                     struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
-                                        u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
-                                  struct nphy_txgains target_gain,
-                                  bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
-                                struct nphy_txgains target_gain,
-                                u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
-                                    s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
-                                 s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
-                                       s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
-                               u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
-                                    u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
-                                    struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+                           u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+                              struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+                           struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+                         u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+                             s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+                          s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+                                bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+                        u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+                             u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
 
 #define NPHY_TESTPATTERN_BPHY_EVM   0
 #define NPHY_TESTPATTERN_BPHY_RFCS  1
 
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
 
 void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
                                s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
-                                 u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
 
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
 #endif                         /* _BRCM_PHY_INT_H_ */
index 2c5b66b75970939ba6ee0ffe2b709faecbc3428e..dd8774717adee148134cb85229f422c72fa143e2 100644 (file)
 
 struct brcms_phy;
 
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
-                                                struct brcms_info *wl,
-                                                struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+                                         struct brcms_info *wl,
+                                         struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
 
 /* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
-                                           void (*fn) (struct brcms_phy *pi),
-                                           void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
-                              u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
-                                u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
-                          u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
-                            u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
-                                                     physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
-                                                       physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
-                                         int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
-                                        u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
-                                 void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
-                               const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
-                                      u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+                                    void (*fn)(struct brcms_phy *pi),
+                                    void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+                   int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+                                  int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+                          int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+                        int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
 
 #endif                         /* _BRCM_PHY_SHIM_H_ */
index 20e2012d5a3a2d2d83c8b7544f6828d14d3f6f33..a014bbc4f93555cc789bbc337adc36491da12902 100644 (file)
@@ -20,7 +20,7 @@
 
 #include "types.h"
 
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
index d36ea5e1cc494231e56dd18bb2f0dadc65f4304d..4da38cb4f31854a60878c5da31c22cf4e39980b6 100644 (file)
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
 };
 
 /* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
-              bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
-                                    struct sk_buff *sdu,
-                                    struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
-                                  int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
-                                 int match_reg_offset,
-                                 const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
-                             const struct ieee80211_tx_queue_params *arg,
-                             bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
-                           struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
-                                        u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
-                                  const char *name, struct brcms_info *hdl,
-                                  int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
-                                    struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+                                   struct bcma_device *core, uint unit,
+                                   bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+                             struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+                          const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+                          const struct ieee80211_tx_queue_params *arg,
+                          bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+                        u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+                                 u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+                           struct brcms_info *hdl,
+                           int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+                             struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
                                 struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
-                                       struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
                                    s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
-                                       u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
-                            const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
-                                  struct sk_buff *beacon, u16 tim_offset,
-                                  u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
-                                      struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
-                            size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+                     u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+                           u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+                               struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
 
 #endif                         /* _BRCM_PUB_H_ */
index 980d578825cc0b3b131995bfcf6926bbab0bbbf1..5bb88b78ed648407dbbcb39db73d86ea503d76c9 100644 (file)
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
 
 /* sanitize, and sort a rateset with the basic bit(s) preserved, validate
  * rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
-                                      const struct brcms_c_rateset *hw_rs,
-                                      bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+                                           const struct brcms_c_rateset *hw_rs,
+                                           bool check_brate, u8 txstreams);
 /* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
-                            struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+                         struct brcms_c_rateset *dst);
 
 /* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
-       struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
-       bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
-                       const struct brcms_c_rateset *rs_hw, uint phy_type,
-                       int bandtype, bool cck_only, uint rate_mask,
-                       bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
-                                     u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
-                                         u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+                           struct brcms_c_rateset *dst, bool basic_only,
+                           u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+                            const struct brcms_c_rateset *rs_hw, uint phy_type,
+                            int bandtype, bool cck_only, uint rate_mask,
+                            bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
 
 #endif                         /* _BRCM_RATE_H_ */
index 19f6580f69be27890eea13ddfe7c689cbe6f3089..ba9493009a3340db524bfef6dc5359c6a8be0ec4 100644 (file)
 
 #include "types.h"
 
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
 
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
-                                       u16 *ss_algo_channel,
-                                       u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
-                            struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
-                              bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
-                                     u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
-                                       u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+                                    u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
 
 #endif                         /* _BRCM_STF_H_ */
index 18750a814b4f219f918aaed081d8e99f01e7f6f4..c87dd89bcb78bddd2774bb7275741b1507aaf8d3 100644 (file)
@@ -43,16 +43,14 @@ struct brcms_ucode {
        u32 *bcm43xx_bomminor;
 };
 
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
 
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
 
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
-                               unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
-                                unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int  brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+                         unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int  brcms_check_firmwares(struct brcms_info *wl);
 
 #endif /* _BRCM_UCODE_H_ */
index 92623f02b1c045460c28d860520000c127cfc611..8660a2cba09810428f127967c996a02cfbc174a1 100644 (file)
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
        void (*decchspec)(struct brcmu_chan *ch);
 };
 
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
 
 #endif /* _BRCMU_CHANNELS_H_ */
index 898cacb8d01df299f1e4dce5ebc72fc008e5b4f2..8ba445b3fd72a92ca78cbd1042dcc38528df8efb 100644 (file)
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
        return skb_peek_tail(&pq->q[prec].skblist);
 }
 
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
-                                struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
-                                     struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
-                                            bool (*match_fn)(struct sk_buff *p,
-                                                             void *arg),
-                                            void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+                                    struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+                                     bool (*match_fn)(struct sk_buff *p,
+                                                      void *arg),
+                                     void *arg);
 
 /* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
 
 /* Empty the queue at particular precedence level */
 /* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
-       bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+                      bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* operations on a set of precedences in packet queue */
 
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
-       int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 
 /* operations on packet queue as a whole */
 
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
        return pq->len == 0;
 }
 
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
 /* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
-               bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+                     bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
 /* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
 /* externs */
 /* format/print */
 #ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif                         /* DEBUG */
 
 #ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
 void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
 #else
 __printf(3, 4)
index e310752f0e33014ade59028b754b2fdd38076cb6..40078f5f932ec6b1ea3a26af5453f2bc5d2206ec 100644 (file)
@@ -42,7 +42,6 @@ struct hwbus_priv {
        spinlock_t              lock; /* Serialize all bus operations */
        wait_queue_head_t       wq;
        int claimed;
-       int irq_disabled;
 };
 
 #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -238,9 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
        struct hwbus_priv *self = dev_id;
 
        if (self->core) {
-               disable_irq_nosync(self->func->irq);
-               self->irq_disabled = 1;
+               cw1200_spi_lock(self);
                cw1200_irq_handler(self->core);
+               cw1200_spi_unlock(self);
                return IRQ_HANDLED;
        } else {
                return IRQ_NONE;
@@ -253,9 +252,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
 
        pr_debug("SW IRQ subscribe\n");
 
-       ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler,
-                                     IRQF_TRIGGER_HIGH,
-                                     "cw1200_wlan_irq", self);
+       ret = request_threaded_irq(self->func->irq, NULL,
+                                  cw1200_spi_irq_handler,
+                                  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                  "cw1200_wlan_irq", self);
        if (WARN_ON(ret < 0))
                goto exit;
 
@@ -273,22 +273,13 @@ exit:
 
 static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
 {
+       int ret = 0;
+
        pr_debug("SW IRQ unsubscribe\n");
        disable_irq_wake(self->func->irq);
        free_irq(self->func->irq, self);
 
-       return 0;
-}
-
-static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
-{
-       /* Disables are handled by the interrupt handler */
-       if (enable && self->irq_disabled) {
-               enable_irq(self->func->irq);
-               self->irq_disabled = 0;
-       }
-
-       return 0;
+       return ret;
 }
 
 static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -368,7 +359,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
        .unlock                 = cw1200_spi_unlock,
        .align_size             = cw1200_spi_align_size,
        .power_mgmt             = cw1200_spi_pm,
-       .irq_enable             = cw1200_spi_irq_enable,
 };
 
 /* Probe Function to be called by SPI stack when device is discovered */
index 0b2061bbc68bfa99ba9bce91dcdba18b99984879..acdff0f7f952e03fa25b51b47a9e83eda37d85c6 100644 (file)
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
 
        /* Enable interrupt signalling */
        priv->hwbus_ops->lock(priv->hwbus_priv);
-       ret = __cw1200_irq_enable(priv, 2);
+       ret = __cw1200_irq_enable(priv, 1);
        priv->hwbus_ops->unlock(priv->hwbus_priv);
        if (ret < 0)
                goto unsubscribe;
index 51dfb3a90735521f18a1c572440e3ec8e486f5c8..8b2fc831c3de75f6f33799cc12c6174bad6d837b 100644 (file)
@@ -28,7 +28,6 @@ struct hwbus_ops {
        void (*unlock)(struct hwbus_priv *self);
        size_t (*align_size)(struct hwbus_priv *self, size_t size);
        int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
-       int (*irq_enable)(struct hwbus_priv *self, int enable);
 };
 
 #endif /* CW1200_HWBUS_H */
index 41bd7615ccaa31f2ba8bf8c183a88db0a73bca60..ff230b7aeedd646e1591d01bdd1514afa86d7f0b 100644 (file)
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
        u16 val16;
        int ret;
 
-       /* We need to do this hack because the SPI layer can sleep on I/O
-          and the general path involves I/O to the device in interrupt
-          context.
-
-          However, the initial enable call needs to go to the hardware.
-
-          We don't worry about shutdown because we do a full reset which
-          clears the interrupt enabled bits.
-       */
-       if (priv->hwbus_ops->irq_enable) {
-               ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
-               if (ret || enable < 2)
-                       return ret;
-       }
-
        if (HIF_8601_SILICON == priv->hw_type) {
                ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
                if (ret < 0) {
index 970a48baaf804a38ff1883702bbd9460d7d5b8c8..de7c4ffec3096b07ccaece961666b0bdde51ea27 100644 (file)
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
                }
        }
 
-       memcpy(req.bssid, selected->bssid, 6);
+       memcpy(req.bssid, selected->bssid, ETH_ALEN);
        req.channel = selected->chid;
        spin_unlock_irqrestore(&local->lock, flags);
 
index f394af777cf5769057796ce415baf5fe390c85d6..81903e33d5b1bd4779681da798dc0183c0638f48 100644 (file)
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
 /* data's copy of the eeprom data                                 */
 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
 {
-       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
 }
 
 static void ipw_read_eeprom(struct ipw_priv *priv)
index 6eede52ad8c0fbc0581a2b3a68d302cb589f4dde..5ce2f59d3378ed7ddcc4891769d72401eb66cc0d 100644 (file)
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
 }
 
 /* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
-extern void libipw_networks_age(struct libipw_device *ieee,
-                                  unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
 
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
 
 /* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
-                              struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
 
 /* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
-                    struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
-                       struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+                  struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+             struct libipw_rx_stats *rx_stats);
 /* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
-                            struct libipw_hdr_4addr *header,
-                            struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+                  struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
 
 /* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
-                                                    *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
-                            const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
-                                     u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
-                                     u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
-                                     u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
-                                                            libipw_device
-                                                            *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
-                                     u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+                                               u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
 
 /* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
-                                struct iw_request_info *info,
-                                union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
 
 static inline void libipw_increment_scans(struct libipw_device *ieee)
 {
index 9a8703def0ba1a1825c264b5e5806003412c5e91..00030d43a1947380cf818bae7ff11a31b26d92a8 100644 (file)
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
  * for use by iwl-*.c
  *
  *****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
-                                            struct ieee80211_hdr *hdr,
-                                            int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
-                                    char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+                                     struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+                             bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
 
 /******************************************************************************
  *
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
  * il3945_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
-                                          struct il_tx_queue *txq,
-                                          dma_addr_t addr, u16 len, u8 reset,
-                                          u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
-                                            struct il3945_frame *frame,
-                                            u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                                   dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+                                     struct il3945_frame *frame, u8 rate);
 void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
                                 struct ieee80211_tx_info *info,
                                 struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
 void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
 
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
 
 /**
  * il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
  * not yet been merged into a single common layer for managing the
  * station tables.
  */
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
 
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
 
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
 
 /* scanning */
 int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
index 1b15b0b2292b4fe06c3fa1efcc95279c9901370e..337dfcf3bbde7c58fe4681268f63b9d29ea8f5fa 100644 (file)
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
        ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
         (t) > IL_TX_POWER_TEMPERATURE_MAX)
 
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
 /********************* END TEMPERATURE ***************************************/
 
 /********************* START TXPOWER *****************************************/
index 83f8ed8a5528cbd209c97560e9ab9c59d2294a90..ad123d66ab6c5c13e2bd120df7b3c2b412d1400a 100644 (file)
@@ -858,9 +858,9 @@ struct il_hw_params {
  * il4965_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
 extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
 static inline int
 il_queue_used(const struct il_queue *q, int i)
 {
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
 void il_free_txq_mem(struct il_priv *il);
 
 #ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
 #else
 static inline void
 il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
 /*****************************************************
 * TX
 ******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
 /*****************************************************
  * TX power
  ****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
        return il_is_ready(il);
 }
 
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
 
 int il_apm_init(struct il_priv *il);
 
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
 
 irqreturn_t il_isr(int irq, void *data);
 
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
 
 static inline void
 _il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
  * The specific throughput table used is based on the type of network
  * the associated with, including A, B, G, and G w/ TGG protection
  */
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
 
 /* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
 
 /**
  * il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
  * ieee80211_register_hw
  *
  */
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
 
 /**
  * il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
 
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
 
 extern u32 il_debug_level;
 
index f2a86ffc3b4cf09440874c88b6f0cc3d804ec3a6..23d5f0275ce98e1cde6c05e7b9a1654c870774ea 100644 (file)
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
        return cpu_to_le32(flags|(u32)rate);
 }
 
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
index a79fdd137f956ce5cb4de2ef7e1c0f239e76895a..7434d9edf3b773566530b79eb542a8431bbfb24c 100644 (file)
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
  * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
  *
  ****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
 extern const u8 iwl_bcast_addr[ETH_ALEN];
 
 #define IWL_OPERATION_MODE_AUTO     0
index 5d83cab22d625084389b8880b46a4abd21fb9f44..26fc550cd68c58ae24342232bda51be2dfd690a5 100644 (file)
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
 
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
-                            struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                     u8 sta_id);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
  * ieee80211_register_hw
  *
  */
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
 
 #endif /* __iwl_agn__rs__ */
index da442b81370a769054b4b77a946daa4bbd856ed8..1fef5240e6adc07317b7128bf7d7617385fa9aa8 100644 (file)
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        /* Copy MAC header from skb into command buffer */
        memcpy(tx_cmd->hdr, hdr, hdr_len);
 
+       txq_id = info->hw_queue;
+
        if (is_agg)
                txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
        else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
-               /*
-                * Send this frame after DTIM -- there's a special queue
-                * reserved for this for contexts that support AP mode.
-                */
-               txq_id = ctx->mcast_queue;
-
                /*
                 * The microcode will clear the more data
                 * bit in the last frame it transmits.
                 */
                hdr->frame_control |=
                        cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-       } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
-               txq_id = IWL_AUX_QUEUE;
-       else
-               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+       }
 
-       WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
        WARN_ON_ONCE(is_agg &&
                     priv->queue_to_mac80211[txq_id] != info->hw_queue);
 
index 30d45e2fc193a5bc04987acbdef9b0252a2b5c01..8ac305be68f489be533606cb8260dbfd4dc5cfda 100644 (file)
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
        .ht_params = &iwl6000_ht_params,
 };
 
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+       .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+       IWL_DEVICE_6035,
+       .ht_params = &iwl6000_ht_params,
+};
+
 const struct iwl_cfg iwl1030_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
        IWL_DEVICE_6030,
index 261e4a12fd8e1a99dba8f29fd0982b15f07e380c..18f232e8e81253b31d730755f242b7a51552157c 100644 (file)
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern const struct iwl_cfg iwl2030_2bgn_cfg;
 extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
index dd57a36ecb1005d7571290a59925fd4d641b61aa..c6bac7c90b00bf458b17a829ec2895c2acaa065d 100644 (file)
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
 {
        int ret;
 
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               return -EIO;
+       }
 
        if (!(cmd->flags & CMD_ASYNC))
                lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -638,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
                               struct iwl_device_cmd *dev_cmd, int queue)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        return trans->ops->tx(trans, skb, dev_cmd, queue);
 }
@@ -647,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
                                     int ssn, struct sk_buff_head *skbs)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->reclaim(trans, queue, ssn, skbs);
 }
 
 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->txq_disable(trans, queue);
 }
@@ -667,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
 {
        might_sleep();
 
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
                                 frame_limit, ssn);
@@ -683,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        return trans->ops->wait_tx_queue_empty(trans);
 }
index 80d5f88a9d321b080c8cc63a4f0b58b0a35f0d0f..550824aa84ea4bde8794b267bda27401c6b6f058 100644 (file)
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                if (!mvmvif->queue_params[ac].uapsd)
                        continue;
 
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
                cmd->uapsd_ac_flags |= BIT(ac);
 
                /* QNDP TID - the highest TID with no admission control */
index 721e6b31b712d77d1f7b8ba1103ceaaee8658703..5d5344f7070bf5de62a4cde464c573b43d01f8db 100644 (file)
@@ -352,9 +352,8 @@ static inline u8 num_of_ant(u8 mask)
 }
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
-                                struct ieee80211_sta *sta,
-                                enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                         enum ieee80211_band band);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -366,7 +365,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
  * ieee80211_register_hw
  *
  */
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -374,7 +373,7 @@ extern int iwl_mvm_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
 
 struct iwl_mvm_sta;
 
index 778dcd9320fe32de4a3f6ecd1e78c23b841f7bd8..dff7592e1ff84e5c3f6a05d673e2a39514cbb870 100644 (file)
@@ -97,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
 
 static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
 {
-       if (vif->bss_conf.assoc)
-               return cpu_to_le32(vif->bss_conf.beacon_int);
-       else
+       if (!vif->bss_conf.assoc)
                return 0;
+
+       return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
 }
 
 static inline __le32
@@ -423,6 +423,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
                        return false;
                }
 
+               /*
+                * If scan cannot be aborted, it means that we had a
+                * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
+                * ieee80211_scan_completed already.
+                */
                IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
                               *resp);
                return true;
@@ -446,14 +451,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                                               SCAN_COMPLETE_NOTIFICATION };
        int ret;
 
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return;
+
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
                                   scan_abort_notif,
                                   ARRAY_SIZE(scan_abort_notif),
                                   iwl_mvm_scan_abort_notif, NULL);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
+                                  CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+               /* mac80211's state will be cleaned in the fw_restart flow */
                goto out_remove_notif;
        }
 
index ddf15e1cffa21ad4129c3ce6a1a9441a20f1189c..941c0c88f982639b28436a60e52a0fdc6fba8c4b 100644 (file)
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x00 Series */
        {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
 
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
        {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x35 Series */
        {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
 
@@ -260,54 +270,86 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 #if IS_ENABLED(CONFIG_IWLMVM)
 /* 7260 Series */
        {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
 
 /* 3160 Series */
        {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
index b4168415538c592ea9dd84ab4e8006eceeb1fc56..f644fcf861a8c9ead756169ce4b6d960a8f51e1b 100644 (file)
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
                 * non-AGG queue.
                 */
                iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+               ssn = trans_pcie->txq[txq_id].q.read_ptr;
        }
 
        /* Place first TFD at index corresponding to start sequence number.
@@ -1463,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        spin_unlock_bh(&txq->lock);
 }
 
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
+#define COMMAND_POKE_TIMEOUT   (HZ / 10)
 
 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
                                    struct iwl_host_cmd *cmd)
@@ -1491,6 +1494,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int cmd_idx;
        int ret;
+       int timeout = HOST_COMPLETE_TIMEOUT;
 
        IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
                       get_cmd_string(trans_pcie, cmd->id));
@@ -1515,10 +1519,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                return ret;
        }
 
-       ret = wait_event_timeout(trans_pcie->wait_command_queue,
-                                !test_bit(STATUS_HCMD_ACTIVE,
-                                          &trans_pcie->status),
-                                HOST_COMPLETE_TIMEOUT);
+       while (timeout > 0) {
+               unsigned long flags;
+
+               timeout -= COMMAND_POKE_TIMEOUT;
+               ret = wait_event_timeout(trans_pcie->wait_command_queue,
+                                        !test_bit(STATUS_HCMD_ACTIVE,
+                                                  &trans_pcie->status),
+                                        COMMAND_POKE_TIMEOUT);
+               if (ret)
+                       break;
+               /* poke the device - it may have lost the command */
+               if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+                       iwl_trans_release_nic_access(trans, &flags);
+                       IWL_DEBUG_INFO(trans,
+                                      "Tried to wake NIC for command %s\n",
+                                      get_cmd_string(trans_pcie, cmd->id));
+               } else {
+                       IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+                               get_cmd_string(trans_pcie, cmd->id));
+                       break;
+               }
+       }
+
        if (!ret) {
                if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
                        struct iwl_txq *txq =
index 21c688264708316b34c51d196aa36fff5f5aaf01..1214c587fd08587f263b2c979eab9bc902b53ae1 100644 (file)
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
  */
 int
 mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-                         struct mwifiex_ra_list_tbl *pra_list, int headroom,
+                         struct mwifiex_ra_list_tbl *pra_list,
                          int ptrindex, unsigned long ra_list_flags)
                          __releases(&priv->wmm.ra_list_spinlock)
 {
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        int pad = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
+       int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
        skb_src = skb_peek(&pra_list->skb_head);
        if (!skb_src) {
index 900e1c62a0cceb4499457be3b76be7e9589be415..892098d6a69687dd2d8c1fc61612a6fb9999d754 100644 (file)
@@ -26,7 +26,7 @@
 int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
                                struct sk_buff *skb);
 int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-                             struct mwifiex_ra_list_tbl *ptr, int headroom,
+                             struct mwifiex_ra_list_tbl *ptr,
                              int ptr_index, unsigned long flags)
                              __releases(&priv->wmm.ra_list_spinlock);
 
index fb3fa18390b8fb4bb31fdcdb45e7a54511f3433f..e47f4e3012b85344c7d400d78799634cb5bf0234 100644 (file)
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
        uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
 
        if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
-           adapter->iface_type == MWIFIEX_SDIO) {
+           adapter->iface_type != MWIFIEX_USB) {
                mwifiex_hs_activated_event(priv, true);
                return 0;
        } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
        }
        if (conditions != HS_CFG_CANCEL) {
                adapter->is_hs_configured = true;
-               if (adapter->iface_type == MWIFIEX_USB ||
-                   adapter->iface_type == MWIFIEX_PCIE)
+               if (adapter->iface_type == MWIFIEX_USB)
                        mwifiex_hs_activated_event(priv, true);
        } else {
                adapter->is_hs_configured = false;
index 717fbe2e0e5aea2b5d86482da86cce7b3ccc393d..4e4686e6ac092b3ce23282c575eadc2b40071eff 100644 (file)
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
  */
 int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
 {
+       int ret = 0;
+
        if (!priv->media_connected)
                return 0;
 
        switch (priv->bss_mode) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
-               return mwifiex_deauthenticate_infra(priv, mac);
+               ret = mwifiex_deauthenticate_infra(priv, mac);
+               if (ret)
+                       cfg80211_disconnected(priv->netdev, 0, NULL, 0,
+                                             GFP_KERNEL);
+               break;
        case NL80211_IFTYPE_ADHOC:
                return mwifiex_send_cmd_sync(priv,
                                             HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
                break;
        }
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
 
index 408f307694aab03c8e471a9704a4ab157406b634..9d7c9d354d34aeb9e0b3e4bf854fc6a999671958 100644 (file)
@@ -358,10 +358,12 @@ process_start:
                }
        } while (true);
 
-       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto process_start;
+       }
 
-       spin_lock_irqsave(&adapter->main_proc_lock, flags);
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
index 8b057524b252e535307644ddaa98e12df75a35cd..8c351f71f72f9f6f6a17650198ba805bf7c1f5cd 100644 (file)
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        dev_dbg(adapter->dev,
                "info: successfully disconnected from %pM: reason code %d\n",
                priv->cfg_bssid, reason_code);
-       if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+       if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+           priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
                                      GFP_KERNEL);
        }
index 2472d4b7f00e997bc9e6b21586edcf42fe692498..1c70b8d092270ba3a456664aaf6ec3e9da4b3b59 100644 (file)
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
         */
        adapter->is_suspended = true;
 
-       for (i = 0; i < adapter->priv_num; i++)
-               netif_carrier_off(adapter->priv[i]->netdev);
-
        if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
                usb_kill_urb(card->rx_cmd.urb);
 
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
                                                  MWIFIEX_RX_CMD_BUF_SIZE);
        }
 
-       for (i = 0; i < adapter->priv_num; i++)
-               if (adapter->priv[i]->media_connected)
-                       netif_carrier_on(adapter->priv[i]->netdev);
-
        /* Disable Host Sleep */
        if (adapter->hs_activated)
                mwifiex_cancel_hs(mwifiex_get_priv(adapter,
index 8f8fea015cb44f4bf926d94f5f78286dd31f7ecc..5dd0ccc70b863ea15fad25d739adebd39ce09dbd 100644 (file)
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
                    mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
-                       mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
-                                                 ptr_index, flags);
+                       mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
                        /* ra_list_spinlock has been freed in
                           mwifiex_11n_aggregate_pkt() */
                else
index 644d6e0c51ccf235e5b457da85f57c64029e5c2d..0f129d498fb1057d898db2c6e92868d8ff1f3baf 100644 (file)
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 }
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
-                                       struct sk_buff *skb);
+                                struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
-                             struct mwifiex_ra_list_tbl *ra,
-                             int tid);
+                             struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
                            struct mwifiex_ra_list_tbl *ra_list, int tid);
 
 u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
-                                            const struct sk_buff *skb);
+                                    const struct sk_buff *skb);
 void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
 
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
-                                                u8 **assoc_buf,
-                                                struct ieee_types_wmm_parameter
-                                                *wmmie,
-                                                struct ieee80211_ht_cap
-                                                *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+                                       u8 **assoc_buf,
+                                       struct ieee_types_wmm_parameter *wmmie,
+                                       struct ieee80211_ht_cap *htcap);
 
 void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
-                                       struct ieee_types_wmm_parameter
-                                       *wmm_ie);
+                                       struct ieee_types_wmm_parameter *wmm_ie);
 void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
-                                     const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+                              const struct host_cmd_ds_command *resp);
 
 #endif /* !_MWIFIEX_WMM_H_ */
index 3bb936b9558c7f96dedc215189f72b49a313b8d7..eebd2be21ee9067e7d90f45f32b227bb07601998 100644 (file)
@@ -182,23 +182,20 @@ extern int orinoco_debug;
 /* Exported prototypes                                              */
 /********************************************************************/
 
-extern struct orinoco_private *alloc_orinocodev(
-       int sizeof_card, struct device *device,
-       int (*hard_reset)(struct orinoco_private *),
-       int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
-                         unsigned long base_addr,
-                         unsigned int irq,
-                         const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+                                        int (*hard_reset)(struct orinoco_private *),
+                                        int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+                  unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
 
 int orinoco_process_xmit_skb(struct sk_buff *skb,
                             struct net_device *dev,
index b9deef66cf4b8179893b918fbd0fb3365480a686..e328d3058c419a0c7c4c367fd7e62ada31ddded4 100644 (file)
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
        {USB_DEVICE(0x06a9, 0x000e)},   /* Westell 802.11g USB (A90-211WG-01) */
        {USB_DEVICE(0x06b9, 0x0121)},   /* Thomson SpeedTouch 121g */
        {USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+       {USB_DEVICE(0x07aa, 0x0020)},   /* Corega WLUSB2GTST USB */
        {USB_DEVICE(0x0803, 0x4310)},   /* Zoom 4410a */
        {USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
        {USB_DEVICE(0x083a, 0x4531)},   /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
        if (err) {
                dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
                                          "(%d)!\n", p54u_fwlist[i].fw, err);
+               usb_put_dev(udev);
        }
 
        return err;
index 1c22b81e6ef35e30f86afc994445ff66ef4f7c36..8863a6cb2388d952926cf8fe209a8b67fa004ddb 100644 (file)
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
        data = r.ptr;
 
        /* copy this MAC to the bss */
-       memcpy(bss.address, data, 6);
+       memcpy(bss.address, data, ETH_ALEN);
        kfree(data);
 
        /* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
                return -EINVAL;
 
        /* prepare the structure for the set object */
-       memcpy(&bssid[0], awrq->sa_data, 6);
+       memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
 
        /* set the bssid -- does this make sense when in AP mode? */
        rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
        int rvalue;
 
        rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
-       memcpy(awrq->sa_data, r.ptr, 6);
+       memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
        kfree(r.ptr);
 
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
        size_t wpa_ie_len;
 
        /* The first entry must be the MAC address */
-       memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+       memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
        iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
        iwe.cmd = SIOCGIWAP;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
                              &((struct sockaddr *) addr)->sa_data);
        if (!ret)
                memcpy(priv->ndev->dev_addr,
-                      &((struct sockaddr *) addr)->sa_data, 6);
+                      &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
 
        return ret;
 }
index 5970ff6f40cc315dd253054c84f031cceda01170..41a16d30c79c5be46f89229d3185a033d8131f0f 100644 (file)
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
        /* ndev->set_multicast_list = &islpci_set_multicast_list; */
        ndev->addr_len = ETH_ALEN;
        /* Get a non-zero dummy MAC address for nameif. Jean II */
-       memcpy(ndev->dev_addr, dummy_mac, 6);
+       memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
 
        ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
 
index a01606b36e03f836a5cc57ab292dd47f2ba2352f..056af38e72e399d88528343e978fec15d512f180 100644 (file)
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
                                     isl_oid[GEN_OID_MACADDRESS].size, &res);
 
        if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
-               memcpy(priv->ndev->dev_addr, res->data, 6);
+               memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
        else
                ret = -EIO;
        if (res)
index 3d53a09da5a12da22f2697c9a22b7dfb49daaff0..38ed9a3e44c8c00c06ea8f1cdd9ae1a68bbe2d60 100644 (file)
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
         */
        rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
        rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
-       rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+       rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
            entry->queue->rt2x00dev->rssi_offset;
        rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
 
index 96677ce55da46378136a2555062664133c7af931..997df03a0c2e22abd46bd274bf13f4b1e4f81f01 100644 (file)
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
        return false;
 }
 
+#define TXSTATUS_READ_INTERVAL 1000000
+
 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
                                                 int urb_status, u32 tx_status)
 {
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
                queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 
        if (rt2800usb_txstatus_pending(rt2x00dev)) {
-               /* Read register after 250 us */
-               hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+               /* Read register after 1 ms */
+               hrtimer_start(&rt2x00dev->txstatus_timer,
+                             ktime_set(0, TXSTATUS_READ_INTERVAL),
                              HRTIMER_MODE_REL);
                return false;
        }
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
        if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
                return;
 
-       /* Read TX_STA_FIFO register after 500 us */
-       hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+       /* Read TX_STA_FIFO register after 2 ms */
+       hrtimer_start(&rt2x00dev->txstatus_timer,
+                     ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
                      HRTIMER_MODE_REL);
 }
 
index 51f17cfb93f9aa48c255cd4d0ea8c03a359f071b..7c157857f5cee925e796a49396a5843a758a01cd 100644 (file)
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct data_queue *queue;
 
+       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+               return;
+
        tx_queue_for_each(rt2x00dev, queue)
                rt2x00queue_flush_queue(queue, drop);
 }
index 6c5d667103c4966606940af0976312165a28bc96..25da20e7e1f34ab2c0b1fd3e38a82d1faece0f9e 100644 (file)
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
                goto exit_release_regions;
        }
 
-       pci_enable_msi(pci_dev);
-
        hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
        if (!hw) {
                rt2x00_probe_err("Failed to allocate hardware\n");
                retval = -ENOMEM;
-               goto exit_disable_msi;
+               goto exit_release_regions;
        }
 
        pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
 exit_free_device:
        ieee80211_free_hw(hw);
 
-exit_disable_msi:
-       pci_disable_msi(pci_dev);
-
 exit_release_regions:
        pci_release_regions(pci_dev);
 
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
        rt2x00pci_free_reg(rt2x00dev);
        ieee80211_free_hw(hw);
 
-       pci_disable_msi(pci_dev);
-
        /*
         * Free the PCI device data.
         */
index 35e00086a520bf06393237b3aebf0aa3bbb9e617..0105e6c1901ed96fc6be6cfd4900b2285ed0835a 100644 (file)
 #define        CAM_CONFIG_USEDK                                1
 #define        CAM_CONFIG_NO_USEDK                             0
 
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
-                       u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+                        u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+                        u32 ul_default_key, u8 *key_content);
 int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id);
+                            u32 ul_key_id);
 void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
index 733b7ce7f0e2a981f442cd9cdadd8f40a6426558..210ce7cd94d8d14201a68ce285e9c880993d30db 100644 (file)
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_lock(&rtlpriv->locks.conf_mutex);
 
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
 
        /*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mac->p2p = 0;
        mac->vif = NULL;
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
        mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
        rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        mac->link_state = MAC80211_LINKED;
                        mac->cnt_after_linked = 0;
                        mac->assoc_id = bss_conf->aid;
-                       memcpy(mac->bssid, bss_conf->bssid, 6);
+                       memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
 
                        if (rtlpriv->cfg->ops->linked_set_reg)
                                rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
                                rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
                        mac->link_state = MAC80211_NOLINK;
-                       memset(mac->bssid, 0, 6);
+                       memset(mac->bssid, 0, ETH_ALEN);
                        mac->vendor = PEER_UNKNOWN;
 
                        if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                         bss_conf->bssid);
 
                mac->vendor = PEER_UNKNOWN;
-               memcpy(mac->bssid, bss_conf->bssid, 6);
+               memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
                rtlpriv->cfg->ops->set_network_type(hw, vif->type);
 
                rcu_read_lock();
index 395a326acfb44a0874b331b64442d23b5e120d1a..1663b3afd41e90e67512823f0c5a0f7ee02ef80b 100644 (file)
@@ -104,20 +104,19 @@ struct efuse_priv {
        u8 tx_power_g[14];
 };
 
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
-                      u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
-                             u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
-                              u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+                      u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+                       u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
 
 #endif
index d4545f06e185fb0aea5f4889ce275a98de7d105b..89f0f1ef14657ae467bea1445658cfd202c31da6 100644 (file)
@@ -200,35 +200,35 @@ enum _ANT_DIV_TYPE {
        CGCS_RX_SW_ANTDIV               = 0x05,
 };
 
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+                           u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr,
+                           u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath, u32 regaddr,
+                          u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                 long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
 void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
 bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 #endif
index aeb268b190c6aa803618facefa899f455b3f0bad..94486cca400077515ced7c81e0c35922af97e425 100644 (file)
@@ -186,34 +186,29 @@ struct tx_power_struct {
 };
 
 bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                          u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask, u32 data);
 bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
 bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
+                                         enum radio_path rfpath);
 void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
 bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
                                          long power_indbm);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
+                           enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
 u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
 void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
-                                        u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
 void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
 void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
 void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -221,27 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
 bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
-                                             u32 rfpath);
+                                      u32 rfpath);
 bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+                                   enum rf_pwrstate rfpwr_state);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
 void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+                              u32 offset);
 u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset);
+                                 enum radio_path rfpath, u32 offset);
 u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
 void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data);
+                                enum radio_path rfpath, u32 offset, u32 data);
 void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data);
+                                   enum radio_path rfpath, u32 offset,
+                                   u32 data);
 void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-                                                  u32 regaddr, u32 bitmask,
-                                                  u32 data);
+                                           u32 regaddr, u32 bitmask, u32 data);
 bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
 void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
 bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
index 6c8d56efceae2fd8e76e8e128fea6e720241cf12..d8fe68b389d213e17b133b7044fec647b315f2b8 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                            u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                               u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
 #endif
index 090fd33a158db1635c14d3ea323a5363f7c82169..11b439d6b67167c11e1529919bc5f07dfe96e060 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
 bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
 bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
-                                         enum radio_path rfpath);
+                                          enum radio_path rfpath);
 void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                        u8 *ppowerlevel);
 void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
index 04c7e57dbce2f5165626c15344c23b348f58abbf..25e50ffc44ec8b42fd35b4ab0b60a08ee09edd7d 100644 (file)
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
                                        (bool)GET_RX_DESC_PAGGR(pdesc));
        rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
        if (phystatus) {
-               p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+               p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+                                                    stats->rx_bufshift);
                rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
                                                 p_drvinfo);
        }
index 7c9f7a2f1e427d2bac52cfacdea38c949f390a2e..1bc7b1a96d4aebe5577067460e56fa698405b3a8 100644 (file)
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
                     u8 *p_macaddr, bool is_group, u8 enc_algo,
                     bool is_wepkey, bool clear_all);
 
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                   u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                 u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+                            u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
 void rtl92de_suspend(struct ieee80211_hw *hw);
 void rtl92de_resume(struct ieee80211_hw *hw);
 void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
index bef3040555dd56f11c075fd896e4a5d5d0e34252..48d5c6835b6a98a950daf3f14a5330606a656ba5 100644 (file)
@@ -125,32 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
                        *flag);
 }
 
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+                           u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr,
+                           u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath, u32 regaddr,
+                          u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
 bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum rf_content content,
                                          enum radio_path rfpath);
 bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
 void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
index 0fe1a48593e8b90dd797335efed9fd9e38598ef6..7303d12c266fbe2ed44d55aa7e46f689e1acc2c2 100644 (file)
 #ifndef __RTL92D_RF_H__
 #define __RTL92D_RF_H__
 
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
-                                           bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
 
 #endif
index 3d8f9e3aad76ccef40632c44bbb24004cc57c999..007ebdbbe108623f88c163e77d3e548eabc00eec 100644 (file)
@@ -183,40 +183,40 @@ struct tx_power_struct {
        u32 mcs_original_offset[4][16];
 };
 
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                     u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                    u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 regaddr,
-                                     u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                    enum radio_path rfpath, u32 regaddr,
-                                    u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                           long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
-                                           u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
-                                            long power_indbm);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                     enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+                              u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+                             u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+                              enum radio_path rfpath, u32 regaddr,
+                              u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+                             enum radio_path rfpath, u32 regaddr,
+                             u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                    long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+                                    u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+                                     long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+                              enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
 void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                             enum radio_path rfpath);
 bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                            enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                     enum rf_pwrstate rfpwr_state);
 
 #endif
index d0f9dd79abea6f9f40ed12a1c8cc3eeea337bd68..57f1933ee663e48b2433ef887a0b361a107e9770 100644 (file)
 
 #define RF6052_MAX_TX_PWR              0x3F
 
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                         u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                          u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
 
 #endif
index 96763dcff5ae19283b5962f17efeffdac705e8cf..d224dc3bb092b0ef04545cc57891a7bdb4de4b2f 100644 (file)
@@ -2055,7 +2055,7 @@ struct rtl_priv {
           that it points to the data allocated
           beyond  this structure like:
           rtl_pci_priv or rtl_usb_priv */
-       u8 priv[0];
+       u8 priv[0] __aligned(sizeof(void *));
 };
 
 #define rtl_priv(hw)           (((struct rtl_priv *)(hw)->priv))
index 5715318d6bab3b4c7905c7c69ee549e39636c6cc..55b8dec86233c60f9b3e1dcf96006d29ca5361c3 100644 (file)
@@ -87,9 +87,13 @@ struct pending_tx_info {
 struct xenvif_rx_meta {
        int id;
        int size;
+       int gso_type;
        int gso_size;
 };
 
+#define GSO_BIT(type) \
+       (1 << XEN_NETIF_GSO_TYPE_ ## type)
+
 /* Discriminate from any valid pending_idx value. */
 #define INVALID_PENDING_IDX 0xFFFF
 
@@ -150,10 +154,12 @@ struct xenvif {
        u8               fe_dev_addr[6];
 
        /* Frontend feature information. */
+       int gso_mask;
+       int gso_prefix_mask;
+
        u8 can_sg:1;
-       u8 gso:1;
-       u8 gso_prefix:1;
-       u8 csum:1;
+       u8 ip_csum:1;
+       u8 ipv6_csum:1;
 
        /* Internal feature information. */
        u8 can_queue:1;     /* can queue packets for receiver? */
index 01bb854c7f62bfc281dfdc0081829237f2f843d6..e4aa26748f806f175808564180d6e593d8b92954 100644 (file)
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
 
        if (!vif->can_sg)
                features &= ~NETIF_F_SG;
-       if (!vif->gso && !vif->gso_prefix)
+       if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
                features &= ~NETIF_F_TSO;
-       if (!vif->csum)
+       if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+               features &= ~NETIF_F_TSO6;
+       if (!vif->ip_csum)
                features &= ~NETIF_F_IP_CSUM;
+       if (!vif->ipv6_csum)
+               features &= ~NETIF_F_IPV6_CSUM;
 
        return features;
 }
@@ -306,7 +310,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->domid  = domid;
        vif->handle = handle;
        vif->can_sg = 1;
-       vif->csum = 1;
+       vif->ip_csum = 1;
        vif->dev = dev;
 
        vif->credit_bytes = vif->remaining_credit = ~0UL;
@@ -316,8 +320,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->credit_timeout.expires = jiffies;
 
        dev->netdev_ops = &xenvif_netdev_ops;
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
-       dev->features = dev->hw_features;
+       dev->hw_features = NETIF_F_SG |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+               NETIF_F_TSO | NETIF_F_TSO6;
+       dev->features = dev->hw_features | NETIF_F_RXCSUM;
        SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
index f3e591c611ded744ec0b6ea2cefe9b7dd3d568c7..828fdab4f1a4b08ae456f8223e02218486cfc43c 100644 (file)
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
        return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
  */
-#define PKT_PROT_LEN    (ETH_HLEN + \
-                        VLAN_HLEN + \
-                        sizeof(struct iphdr) + MAX_IPOPTLEN + \
-                        sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
 
 static u16 frag_get_pending_idx(skb_frag_t *frag)
 {
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
        int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
 
        /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-       if (vif->can_sg || vif->gso || vif->gso_prefix)
+       if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
                max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
 
        return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
        req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
 
        meta = npo->meta + npo->meta_prod++;
+       meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
        meta->gso_size = 0;
        meta->size = 0;
        meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
        unsigned long bytes;
+       int gso_type;
 
        /* Data must not cross a page boundary. */
        BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                }
 
                /* Leave a gap for the GSO descriptor. */
-               if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+                       gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                       gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+               else
+                       gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+               if (*head && ((1 << gso_type) & vif->gso_mask))
                        vif->rx.req_cons++;
 
                *head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        unsigned char *data;
        int head = 1;
        int old_meta_prod;
+       int gso_type;
+       int gso_size;
 
        old_meta_prod = npo->meta_prod;
 
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+               gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+               gso_size = skb_shinfo(skb)->gso_size;
+       } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+               gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+               gso_size = skb_shinfo(skb)->gso_size;
+       } else {
+               gso_type = XEN_NETIF_GSO_TYPE_NONE;
+               gso_size = 0;
+       }
+
        /* Set up a GSO prefix descriptor, if necessary */
-       if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+       if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
                req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
                meta = npo->meta + npo->meta_prod++;
-               meta->gso_size = skb_shinfo(skb)->gso_size;
+               meta->gso_type = gso_type;
+               meta->gso_size = gso_size;
                meta->size = 0;
                meta->id = req->id;
        }
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
        meta = npo->meta + npo->meta_prod++;
 
-       if (!vif->gso_prefix)
-               meta->gso_size = skb_shinfo(skb)->gso_size;
-       else
+       if ((1 << gso_type) & vif->gso_mask) {
+               meta->gso_type = gso_type;
+               meta->gso_size = gso_size;
+       } else {
+               meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
                meta->gso_size = 0;
+       }
 
        meta->size = 0;
        meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
 
                vif = netdev_priv(skb->dev);
 
-               if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+               if ((1 << vif->meta[npo.meta_cons].gso_type) &
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&vif->rx,
                                                 vif->rx.rsp_prod_pvt++);
 
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
                                        vif->meta[npo.meta_cons].size,
                                        flags);
 
-               if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+               if ((1 << vif->meta[npo.meta_cons].gso_type) &
+                   vif->gso_mask) {
                        struct xen_netif_extra_info *gso =
                                (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
+                       gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
                        gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
                        gso->u.gso.pad = 0;
                        gso->u.gso.features = 0;
 
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
                return -EINVAL;
        }
 
-       /* Currently only TCPv4 S.O. is supported. */
-       if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+       switch (gso->u.gso.type) {
+       case XEN_NETIF_GSO_TYPE_TCPV4:
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+               break;
+       case XEN_NETIF_GSO_TYPE_TCPV6:
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               break;
+       default:
                netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
                xenvif_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        skb_shinfo(skb)->gso_size = gso->u.gso.size;
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 
        /* Header must be checked, and gso_segs computed. */
        skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
        return 0;
 }
 
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+       if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+               /* If we need to pullup then pullup to the max, so we
+                * won't need to do it again.
+                */
+               int target = min_t(int, skb->len, MAX_TCP_HEADER);
+               __pskb_pull_tail(skb, target - skb_headlen(skb));
+       }
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+                            int recalculate_partial_csum)
 {
-       struct iphdr *iph;
+       struct iphdr *iph = (void *)skb->data;
+       unsigned int header_size;
+       unsigned int off;
        int err = -EPROTO;
-       int recalculate_partial_csum = 0;
 
-       /*
-        * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
-        * peers can fail to set NETRXF_csum_blank when sending a GSO
-        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
-        * recalculate the partial checksum.
-        */
-       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
-               vif->rx_gso_checksum_fixup++;
-               skb->ip_summed = CHECKSUM_PARTIAL;
-               recalculate_partial_csum = 1;
-       }
+       off = sizeof(struct iphdr);
 
-       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return 0;
+       header_size = skb->network_header + off + MAX_IPOPTLEN;
+       maybe_pull_tail(skb, header_size);
 
-       if (skb->protocol != htons(ETH_P_IP))
-               goto out;
+       off = iph->ihl * 4;
 
-       iph = (void *)skb->data;
        switch (iph->protocol) {
        case IPPROTO_TCP:
-               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+               if (!skb_partial_csum_set(skb, off,
                                          offsetof(struct tcphdr, check)))
                        goto out;
 
                if (recalculate_partial_csum) {
                        struct tcphdr *tcph = tcp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct tcphdr);
+                       maybe_pull_tail(skb, header_size);
+
                        tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - iph->ihl*4,
+                                                        skb->len - off,
                                                         IPPROTO_TCP, 0);
                }
                break;
        case IPPROTO_UDP:
-               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+               if (!skb_partial_csum_set(skb, off,
                                          offsetof(struct udphdr, check)))
                        goto out;
 
                if (recalculate_partial_csum) {
                        struct udphdr *udph = udp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct udphdr);
+                       maybe_pull_tail(skb, header_size);
+
                        udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - iph->ihl*4,
+                                                        skb->len - off,
                                                         IPPROTO_UDP, 0);
                }
                break;
        default:
                if (net_ratelimit())
                        netdev_err(vif->dev,
-                                  "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+                                  "Attempting to checksum a non-TCP/UDP packet, "
+                                  "dropping a protocol %d packet\n",
                                   iph->protocol);
                goto out;
        }
@@ -1183,6 +1226,158 @@ out:
        return err;
 }
 
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+                              int recalculate_partial_csum)
+{
+       int err = -EPROTO;
+       struct ipv6hdr *ipv6h = (void *)skb->data;
+       u8 nexthdr;
+       unsigned int header_size;
+       unsigned int off;
+       bool fragment;
+       bool done;
+
+       done = false;
+
+       off = sizeof(struct ipv6hdr);
+
+       header_size = skb->network_header + off;
+       maybe_pull_tail(skb, header_size);
+
+       nexthdr = ipv6h->nexthdr;
+
+       while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+              !done) {
+               switch (nexthdr) {
+               case IPPROTO_DSTOPTS:
+               case IPPROTO_HOPOPTS:
+               case IPPROTO_ROUTING: {
+                       struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct ipv6_opt_hdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       nexthdr = hp->nexthdr;
+                       off += ipv6_optlen(hp);
+                       break;
+               }
+               case IPPROTO_AH: {
+                       struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct ip_auth_hdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       nexthdr = hp->nexthdr;
+                       off += (hp->hdrlen+2)<<2;
+                       break;
+               }
+               case IPPROTO_FRAGMENT:
+                       fragment = true;
+                       /* fall through */
+               default:
+                       done = true;
+                       break;
+               }
+       }
+
+       if (!done) {
+               if (net_ratelimit())
+                       netdev_err(vif->dev, "Failed to parse packet header\n");
+               goto out;
+       }
+
+       if (fragment) {
+               if (net_ratelimit())
+                       netdev_err(vif->dev, "Packet is a fragment!\n");
+               goto out;
+       }
+
+       switch (nexthdr) {
+       case IPPROTO_TCP:
+               if (!skb_partial_csum_set(skb, off,
+                                         offsetof(struct tcphdr, check)))
+                       goto out;
+
+               if (recalculate_partial_csum) {
+                       struct tcphdr *tcph = tcp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct tcphdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+                                                      &ipv6h->daddr,
+                                                      skb->len - off,
+                                                      IPPROTO_TCP, 0);
+               }
+               break;
+       case IPPROTO_UDP:
+               if (!skb_partial_csum_set(skb, off,
+                                         offsetof(struct udphdr, check)))
+                       goto out;
+
+               if (recalculate_partial_csum) {
+                       struct udphdr *udph = udp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct udphdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+                                                      &ipv6h->daddr,
+                                                      skb->len - off,
+                                                      IPPROTO_UDP, 0);
+               }
+               break;
+       default:
+               if (net_ratelimit())
+                       netdev_err(vif->dev,
+                                  "Attempting to checksum a non-TCP/UDP packet, "
+                                  "dropping a protocol %d packet\n",
+                                  nexthdr);
+               goto out;
+       }
+
+       err = 0;
+
+out:
+       return err;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+       int err = -EPROTO;
+       int recalculate_partial_csum = 0;
+
+       /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+        * peers can fail to set NETRXF_csum_blank when sending a GSO
+        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+        * recalculate the partial checksum.
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+               vif->rx_gso_checksum_fixup++;
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               recalculate_partial_csum = 1;
+       }
+
+       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
+
+       return err;
+}
+
 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
 {
        unsigned long now = jiffies;
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
 
                xenvif_fill_frags(vif, skb);
 
-               /*
-                * If the initial fragment was < PKT_PROT_LEN then
-                * pull through some bytes from the other fragments to
-                * increase the linear region to PKT_PROT_LEN bytes.
-                */
-               if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+               if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
                        int target = min_t(int, skb->len, PKT_PROT_LEN);
                        __pskb_pull_tail(skb, target - skb_headlen(skb));
                }
index a53782ef154078717cbcff039a010e93e559fa58..f0358992b04fa820cb083300765271b92823320a 100644 (file)
 struct backend_info {
        struct xenbus_device *dev;
        struct xenvif *vif;
+
+       /* This is the state that will be reflected in xenstore when any
+        * active hotplug script completes.
+        */
+       enum xenbus_state state;
+
        enum xenbus_state frontend_state;
        struct xenbus_watch hotplug_status_watch;
        u8 have_hotplug_status_watch:1;
@@ -33,11 +39,15 @@ static int connect_rings(struct backend_info *);
 static void connect(struct backend_info *);
 static void backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+                             enum xenbus_state state);
 
 static int netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
+       set_backend_state(be, XenbusStateClosed);
+
        unregister_hotplug_status_watch(be);
        if (be->vif) {
                kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
@@ -95,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
+               err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+                                   "%d", sg);
+               if (err) {
+                       message = "writing feature-gso-tcpv6";
+                       goto abort_transaction;
+               }
+
+               /* We support partial checksum setup for IPv6 packets */
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "feature-ipv6-csum-offload",
+                                   "%d", 1);
+               if (err) {
+                       message = "writing feature-ipv6-csum-offload";
+                       goto abort_transaction;
+               }
+
                /* We support rx-copy path. */
                err = xenbus_printf(xbt, dev->nodename,
                                    "feature-rx-copy", "%d", 1);
@@ -136,6 +162,8 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                goto fail;
 
+       be->state = XenbusStateInitWait;
+
        /* This kicks hotplug scripts, so do it immediately. */
        backend_create_xenvif(be);
 
@@ -208,24 +236,113 @@ static void backend_create_xenvif(struct backend_info *be)
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
 }
 
-
-static void disconnect_backend(struct xenbus_device *dev)
+static void backend_disconnect(struct backend_info *be)
 {
-       struct backend_info *be = dev_get_drvdata(&dev->dev);
-
        if (be->vif)
                xenvif_disconnect(be->vif);
 }
 
-static void destroy_backend(struct xenbus_device *dev)
+static void backend_connect(struct backend_info *be)
 {
-       struct backend_info *be = dev_get_drvdata(&dev->dev);
+       if (be->vif)
+               connect(be);
+}
 
-       if (be->vif) {
-               kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-               xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
-               xenvif_free(be->vif);
-               be->vif = NULL;
+static inline void backend_switch_state(struct backend_info *be,
+                                       enum xenbus_state state)
+{
+       struct xenbus_device *dev = be->dev;
+
+       pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
+       be->state = state;
+
+       /* If we are waiting for a hotplug script then defer the
+        * actual xenbus state change.
+        */
+       if (!be->have_hotplug_status_watch)
+               xenbus_switch_state(dev, state);
+}
+
+/* Handle backend state transitions:
+ *
+ * The backend state starts in InitWait and the following transitions are
+ * allowed.
+ *
+ * InitWait -> Connected
+ *
+ *    ^    \         |
+ *    |     \        |
+ *    |      \       |
+ *    |       \      |
+ *    |        \     |
+ *    |         \    |
+ *    |          V   V
+ *
+ *  Closed  <-> Closing
+ *
+ * The state argument specifies the eventual state of the backend and the
+ * function transitions to that state via the shortest path.
+ */
+static void set_backend_state(struct backend_info *be,
+                             enum xenbus_state state)
+{
+       while (be->state != state) {
+               switch (be->state) {
+               case XenbusStateClosed:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                               pr_info("%s: prepare for reconnect\n",
+                                       be->dev->nodename);
+                               backend_switch_state(be, XenbusStateInitWait);
+                               break;
+                       case XenbusStateClosing:
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateInitWait:
+                       switch (state) {
+                       case XenbusStateConnected:
+                               backend_connect(be);
+                               backend_switch_state(be, XenbusStateConnected);
+                               break;
+                       case XenbusStateClosing:
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateConnected:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateClosing:
+                       case XenbusStateClosed:
+                               backend_disconnect(be);
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateClosing:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosed);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               default:
+                       BUG();
+               }
        }
 }
 
@@ -237,40 +354,33 @@ static void frontend_changed(struct xenbus_device *dev,
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
-       pr_debug("frontend state %s\n", xenbus_strstate(frontend_state));
+       pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
 
        be->frontend_state = frontend_state;
 
        switch (frontend_state) {
        case XenbusStateInitialising:
-               if (dev->state == XenbusStateClosed) {
-                       pr_info("%s: prepare for reconnect\n", dev->nodename);
-                       xenbus_switch_state(dev, XenbusStateInitWait);
-               }
+               set_backend_state(be, XenbusStateInitWait);
                break;
 
        case XenbusStateInitialised:
                break;
 
        case XenbusStateConnected:
-               if (dev->state == XenbusStateConnected)
-                       break;
-               if (be->vif)
-                       connect(be);
+               set_backend_state(be, XenbusStateConnected);
                break;
 
        case XenbusStateClosing:
-               disconnect_backend(dev);
-               xenbus_switch_state(dev, XenbusStateClosing);
+               set_backend_state(be, XenbusStateClosing);
                break;
 
        case XenbusStateClosed:
-               xenbus_switch_state(dev, XenbusStateClosed);
+               set_backend_state(be, XenbusStateClosed);
                if (xenbus_dev_is_online(dev))
                        break;
-               destroy_backend(dev);
                /* fall through if not online */
        case XenbusStateUnknown:
+               set_backend_state(be, XenbusStateClosed);
                device_unregister(&dev->dev);
                break;
 
@@ -363,7 +473,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
        if (IS_ERR(str))
                return;
        if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
-               xenbus_switch_state(be->dev, XenbusStateConnected);
+               /* Complete any pending state change */
+               xenbus_switch_state(be->dev, be->state);
+
                /* Not interested in this watch anymore. */
                unregister_hotplug_status_watch(be);
        }
@@ -393,12 +505,8 @@ static void connect(struct backend_info *be)
        err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
                                   hotplug_status_changed,
                                   "%s/%s", dev->nodename, "hotplug-status");
-       if (err) {
-               /* Switch now, since we can't do a watch. */
-               xenbus_switch_state(dev, XenbusStateConnected);
-       } else {
+       if (!err)
                be->have_hotplug_status_watch = 1;
-       }
 
        netif_wake_queue(be->vif->dev);
 }
@@ -469,20 +577,50 @@ static int connect_rings(struct backend_info *be)
                val = 0;
        vif->can_sg = !!val;
 
+       vif->gso_mask = 0;
+       vif->gso_prefix_mask = 0;
+
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
                         "%d", &val) < 0)
                val = 0;
-       vif->gso = !!val;
+       if (val)
+               vif->gso_mask |= GSO_BIT(TCPV4);
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
                         "%d", &val) < 0)
                val = 0;
-       vif->gso_prefix = !!val;
+       if (val)
+               vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+                        "%d", &val) < 0)
+               val = 0;
+       if (val)
+               vif->gso_mask |= GSO_BIT(TCPV6);
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+                        "%d", &val) < 0)
+               val = 0;
+       if (val)
+               vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+       if (vif->gso_mask & vif->gso_prefix_mask) {
+               xenbus_dev_fatal(dev, err,
+                                "%s: gso and gso prefix flags are not "
+                                "mutually exclusive",
+                                dev->otherend);
+               return -EOPNOTSUPP;
+       }
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
                         "%d", &val) < 0)
                val = 0;
-       vif->csum = !val;
+       vif->ip_csum = !val;
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+                        "%d", &val) < 0)
+               val = 0;
+       vif->ipv6_csum = !!val;
 
        /* Map the shared frame, irq etc. */
        err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
index 36808bf256770a5e02e7674002ed389c7fd8ac19..dd1011e55cb598096ef7c2ad64981a2572b760d7 100644 (file)
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
                u64_stats_update_end(&stats->syncp);
 
                /* Pass it up. */
-               netif_receive_skb(skb);
+               napi_gro_receive(&np->napi, skb);
        }
 
        return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
        if (work_done < budget) {
                int more_to_do = 0;
 
+               napi_gro_flush(napi, false);
+
                local_irq_save(flags);
 
                RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
index 9d2009a9004d14bc6f80bc9e6192a478fc9362aa..78cc76053328c2c8c70bff559f5dc0f903a241df 100644 (file)
@@ -74,10 +74,4 @@ config OF_MTD
        depends on MTD
        def_bool y
 
-config OF_RESERVED_MEM
-       depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
-       def_bool y
-       help
-         Initialization code for DMA reserved memory
-
 endmenu # OF
index ed9660adad7751357b49914fcb046fa64470767c..efd05102c40533100794b7d6a7626f583a1f0cdc 100644 (file)
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO)   += of_mdio.o
 obj-$(CONFIG_OF_PCI)   += of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
 obj-$(CONFIG_OF_MTD)   += of_mtd.o
-obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
index 865d3f66c86b2735810e1f6d4d7a219dfd9b350e..7d4c70f859e30687bcd0892c195f9cbfc34826dc 100644 (file)
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
        struct device_node *cpun, *cpus;
 
        cpus = of_find_node_by_path("/cpus");
-       if (!cpus) {
-               pr_warn("Missing cpus node, bailing out\n");
+       if (!cpus)
                return NULL;
-       }
 
        for_each_child_of_node(cpus, cpun) {
                if (of_node_cmp(cpun->type, "cpu"))
index 229dd9d69e180529cc7e3135d71001421ab9deea..a4fa9ad31b8f7cfb62f45abf47ada0649d87b867 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/random.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
 }
 
 #endif /* CONFIG_OF_EARLY_FLATTREE */
-
-/* Feed entire flattened device tree into the random pool */
-static int __init add_fdt_randomness(void)
-{
-       if (initial_boot_params)
-               add_device_randomness(initial_boot_params,
-                               be32_to_cpu(initial_boot_params->totalsize));
-
-       return 0;
-}
-core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644 (file)
index 0fe40c7..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Device tree based initialization code for reserved memory.
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- * Author: Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- */
-
-#include <linux/memblock.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
-#include <linux/mm.h>
-#include <linux/sizes.h>
-#include <linux/mm_types.h>
-#include <linux/dma-contiguous.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
-
-#define MAX_RESERVED_REGIONS   16
-struct reserved_mem {
-       phys_addr_t             base;
-       unsigned long           size;
-       struct cma              *cma;
-       char                    name[32];
-};
-static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
-static int reserved_mem_count;
-
-static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
-                                       int depth, void *data)
-{
-       struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
-       phys_addr_t base, size;
-       int is_cma, is_reserved;
-       unsigned long len;
-       const char *status;
-       __be32 *prop;
-
-       is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
-              of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
-       is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
-
-       if (!is_reserved && !is_cma) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       status = of_get_flat_dt_prop(node, "status", &len);
-       if (status && strcmp(status, "okay") != 0) {
-               /* ignore disabled node nad scan next one */
-               return 0;
-       }
-
-       prop = of_get_flat_dt_prop(node, "reg", &len);
-       if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
-                            sizeof(__be32))) {
-               pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
-                      uname);
-               /* ignore node and scan next one */
-               return 0;
-       }
-       base = dt_mem_next_cell(dt_root_addr_cells, &prop);
-       size = dt_mem_next_cell(dt_root_size_cells, &prop);
-
-       if (!size) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
-               uname, (unsigned long)base, (unsigned long)size / SZ_1M);
-
-       if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
-               return -ENOSPC;
-
-       rmem->base = base;
-       rmem->size = size;
-       strlcpy(rmem->name, uname, sizeof(rmem->name));
-
-       if (is_cma) {
-               struct cma *cma;
-               if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
-                       rmem->cma = cma;
-                       reserved_mem_count++;
-                       if (of_get_flat_dt_prop(node,
-                                               "linux,default-contiguous-region",
-                                               NULL))
-                               dma_contiguous_set_default(cma);
-               }
-       } else if (is_reserved) {
-               if (memblock_remove(base, size) == 0)
-                       reserved_mem_count++;
-               else
-                       pr_err("Failed to reserve memory for %s\n", uname);
-       }
-
-       return 0;
-}
-
-static struct reserved_mem *get_dma_memory_region(struct device *dev)
-{
-       struct device_node *node;
-       const char *name;
-       int i;
-
-       node = of_parse_phandle(dev->of_node, "memory-region", 0);
-       if (!node)
-               return NULL;
-
-       name = kbasename(node->full_name);
-       for (i = 0; i < reserved_mem_count; i++)
-               if (strcmp(name, reserved_mem[i].name) == 0)
-                       return &reserved_mem[i];
-       return NULL;
-}
-
-/**
- * of_reserved_mem_device_init() - assign reserved memory region to given device
- *
- * This function assign memory region pointed by "memory-region" device tree
- * property to the given device.
- */
-void of_reserved_mem_device_init(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region)
-               return;
-
-       if (region->cma) {
-               dev_set_cma_area(dev, region->cma);
-               pr_info("Assigned CMA %s to %s device\n", region->name,
-                       dev_name(dev));
-       } else {
-               if (dma_declare_coherent_memory(dev, region->base, region->base,
-                   region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
-                       pr_info("Declared reserved memory %s to %s device\n",
-                               region->name, dev_name(dev));
-       }
-}
-
-/**
- * of_reserved_mem_device_release() - release reserved memory device structures
- *
- * This function releases structures allocated for memory region handling for
- * the given device.
- */
-void of_reserved_mem_device_release(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region && !region->cma)
-               dma_release_declared_memory(dev);
-}
-
-/**
- * early_init_dt_scan_reserved_mem() - create reserved memory regions
- *
- * This function grabs memory from early allocator for device exclusive use
- * defined in device tree structures. It should be called by arch specific code
- * once the early allocator (memblock) has been activated and all other
- * subsystems have already allocated/reserved memory.
- */
-void __init early_init_dt_scan_reserved_mem(void)
-{
-       of_scan_flat_dt_by_path("/memory/reserved-memory",
-                               fdt_scan_reserved_mem, NULL);
-}
index 9b439ac63d8e73eef85aa490d5cab6ec825120b7..049c3d0bddd1068edaf01f4a58165047bb1336f0 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 
 const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
        dev->dev.bus = &platform_bus_type;
        dev->dev.platform_data = platform_data;
 
-       of_reserved_mem_device_init(&dev->dev);
-
        /* We do not fill the DMA ops for platform devices by default.
         * This is currently the responsibility of the platform code
         * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
 
        if (of_device_add(dev) != 0) {
                platform_device_put(dev);
-               of_reserved_mem_device_release(&dev->dev);
                return NULL;
        }
 
@@ -284,9 +280,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        else
                of_device_make_bus_id(&dev->dev);
 
-       /* setup amba-specific device info */
-       dev->dma_mask = ~0;
-
        /* Allow the HW Peripheral ID to be overridden */
        prop = of_get_property(node, "arm,primecell-periphid", NULL);
        if (prop)
index 70694ce38be2bb2579efdc3ff4db3d8e0c73190f..2225237ff63f64a70f6b005f418d7e3de3c5ef93 100644 (file)
@@ -31,14 +31,17 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 if PARPORT
 
 config PARPORT_PC
        tristate "PC-style hardware"
-       depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
-               (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
-               !XTENSA && !CRIS && !H8300
-
+       depends on ARCH_MIGHT_HAVE_PC_PARPORT
        ---help---
          You should say Y here if you have a PC-style parallel port. All
          IBM PC compatible computers and some Alphas have PC-style
index 903e1285fda06ce30c84bf79b44078e733048c38..9637615262296235737a094282645b72f3c6ccc5 100644 (file)
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
        struct resource *ECR_res = NULL;
        struct resource *EPP_res = NULL;
        struct platform_device *pdev = NULL;
+       int ret;
 
        if (!dev) {
                /* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
                        return NULL;
                dev = &pdev->dev;
 
-               dev->coherent_dma_mask = DMA_BIT_MASK(24);
-               dev->dma_mask = &dev->coherent_dma_mask;
+               ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+               if (ret) {
+                       dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+                       dma = PARPORT_DMA_NONE;
+               }
        }
 
        ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
index 3d950481112634fc847d61fef187efc86503a189..efa24d9a33615a1268e83212fc3a04844806497f 100644 (file)
@@ -15,6 +15,12 @@ config PCI_EXYNOS
        select PCIEPORTBUS
        select PCIE_DW
 
+config PCI_IMX6
+       bool "Freescale i.MX6 PCIe controller"
+       depends on SOC_IMX6Q
+       select PCIEPORTBUS
+       select PCIE_DW
+
 config PCI_TEGRA
        bool "NVIDIA Tegra PCIe controller"
        depends on ARCH_TEGRA
index c9a997b2690dc9532e7654ae804c0697652679ae..287d6a053dda4b0a2d61dd85e5a0bc19349fee4f 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
index 94e096bb2d0a3e88d66634ea201dc4e546f03b7e..ee692c2c3d73fa83e4815b8648aafc81386fd8e1 100644 (file)
@@ -48,6 +48,7 @@ struct exynos_pcie {
 #define PCIE_IRQ_SPECIAL               0x008
 #define PCIE_IRQ_EN_PULSE              0x00c
 #define PCIE_IRQ_EN_LEVEL              0x010
+#define IRQ_MSI_ENABLE                 (0x1 << 2)
 #define PCIE_IRQ_EN_SPECIAL            0x014
 #define PCIE_PWR_RESET                 0x018
 #define PCIE_CORE_RESET                        0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
 #define PCIE_PHY_PLL_BIAS              0x00c
 #define PCIE_PHY_DCC_FEEDBACK          0x014
 #define PCIE_PHY_PLL_DIV_1             0x05c
+#define PCIE_PHY_COMMON_POWER          0x064
+#define PCIE_PHY_COMMON_PD_CMN         (0x1 << 3)
 #define PCIE_PHY_TRSV0_EMP_LVL         0x084
 #define PCIE_PHY_TRSV0_DRV_LVL         0x088
 #define PCIE_PHY_TRSV0_RXCDR           0x0ac
+#define PCIE_PHY_TRSV0_POWER           0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV0_LVCC            0x0dc
 #define PCIE_PHY_TRSV1_EMP_LVL         0x144
 #define PCIE_PHY_TRSV1_RXCDR           0x16c
+#define PCIE_PHY_TRSV1_POWER           0x184
+#define PCIE_PHY_TRSV1_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV1_LVCC            0x19c
 #define PCIE_PHY_TRSV2_EMP_LVL         0x204
 #define PCIE_PHY_TRSV2_RXCDR           0x22c
+#define PCIE_PHY_TRSV2_POWER           0x244
+#define PCIE_PHY_TRSV2_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV2_LVCC            0x25c
 #define PCIE_PHY_TRSV3_EMP_LVL         0x2c4
 #define PCIE_PHY_TRSV3_RXCDR           0x2ec
+#define PCIE_PHY_TRSV3_POWER           0x304
+#define PCIE_PHY_TRSV3_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV3_LVCC            0x31c
 
 static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
        exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
 }
 
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+       val &= ~PCIE_PHY_COMMON_PD_CMN;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+       val &= ~PCIE_PHY_TRSV0_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+       val &= ~PCIE_PHY_TRSV1_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+       val &= ~PCIE_PHY_TRSV2_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+       val &= ~PCIE_PHY_TRSV3_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+       val |= PCIE_PHY_COMMON_PD_CMN;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+       val |= PCIE_PHY_TRSV0_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+       val |= PCIE_PHY_TRSV1_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+       val |= PCIE_PHY_TRSV2_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+       val |= PCIE_PHY_TRSV3_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
 static void exynos_pcie_init_phy(struct pcie_port *pp)
 {
        struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
        /* de-assert phy reset */
        exynos_pcie_deassert_phy_reset(pp);
 
+       /* power on phy */
+       exynos_pcie_power_on_phy(pp);
+
        /* initialize phy */
        exynos_pcie_init_phy(pp);
 
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
                                                       PCIE_PHY_PLL_LOCKED);
                                dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
                        }
+                       /* power off phy */
+                       exynos_pcie_power_off_phy(pp);
+
                        dev_err(pp->dev, "PCIe Link Fail\n");
                        return -EINVAL;
                }
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+
+       dw_handle_msi_irq(pp);
+
+       return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       dw_pcie_msi_init(pp);
+
+       /* enable MSI interrupt */
+       val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+       val |= IRQ_MSI_ENABLE;
+       exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+       return;
+}
+
 static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
 {
        exynos_pcie_enable_irq_pulse(pp);
+
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               exynos_pcie_msi_init(pp);
+
        return;
 }
 
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
                return ret;
        }
 
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               pp->msi_irq = platform_get_irq(pdev, 0);
+               if (!pp->msi_irq) {
+                       dev_err(&pdev->dev, "failed to get msi irq\n");
+                       return -ENODEV;
+               }
+
+               ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+                                       exynos_pcie_msi_irq_handler,
+                                       IRQF_SHARED, "exynos-pcie", pp);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request msi irq\n");
+                       return ret;
+               }
+       }
+
        pp->root_bus_nr = -1;
        pp->ops = &exynos_pcie_host_ops;
 
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644 (file)
index 0000000..5afa922
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ *             http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x)        container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+       int                     reset_gpio;
+       int                     power_on_gpio;
+       int                     wake_up_gpio;
+       int                     disable_gpio;
+       struct clk              *lvds_gate;
+       struct clk              *sata_ref_100m;
+       struct clk              *pcie_ref_125m;
+       struct clk              *pcie_axi;
+       struct pcie_port        pp;
+       struct regmap           *iomuxc_gpr;
+       void __iomem            *mem_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+       u32 val;
+       u32 max_iterations = 10;
+       u32 wait_counter = 0;
+
+       do {
+               val = readl(dbi_base + PCIE_PHY_STAT);
+               val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+               wait_counter++;
+
+               if (val == exp_val)
+                       return 0;
+
+               udelay(1);
+       } while (wait_counter < max_iterations);
+
+       return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+       u32 val;
+       int ret;
+
+       val = addr << PCIE_PHY_CTRL_DATA_LOC;
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       val = addr << PCIE_PHY_CTRL_DATA_LOC;
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+       u32 val, phy_ctl;
+       int ret;
+
+       ret = pcie_phy_wait_ack(dbi_base, addr);
+       if (ret)
+               return ret;
+
+       /* assert Read signal */
+       phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+       writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       val = readl(dbi_base + PCIE_PHY_STAT);
+       *data = val & 0xffff;
+
+       /* deassert Read signal */
+       writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+       u32 var;
+       int ret;
+
+       /* write addr */
+       /* cap addr */
+       ret = pcie_phy_wait_ack(dbi_base, addr);
+       if (ret)
+               return ret;
+
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* capture data */
+       var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       /* deassert cap data */
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack de-assertion */
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       /* assert wr signal */
+       var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack */
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       /* deassert wr signal */
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack de-assertion */
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+       return 0;
+}
+
+/*  Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+               unsigned int fsr, struct pt_regs *regs)
+{
+       /*
+        * If it was an imprecise abort, then we need to correct the
+        * return address to be _after_ the instruction.
+        */
+       if (fsr & (1 << 10))
+               regs->ARM_pc += 4;
+       return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+       gpio_set_value(imx6_pcie->reset_gpio, 0);
+       msleep(100);
+       gpio_set_value(imx6_pcie->reset_gpio, 1);
+
+       return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+       int ret;
+
+       if (gpio_is_valid(imx6_pcie->power_on_gpio))
+               gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+       ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable sata_ref_100m\n");
+               goto err_sata_ref;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
+               goto err_pcie_ref;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable lvds_gate\n");
+               goto err_lvds_gate;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable pcie_axi\n");
+               goto err_pcie_axi;
+       }
+
+       /* allow the clocks to stabilize */
+       usleep_range(200, 500);
+
+       return 0;
+
+err_pcie_axi:
+       clk_disable_unprepare(imx6_pcie->lvds_gate);
+err_lvds_gate:
+       clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
+err_pcie_ref:
+       clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+err_sata_ref:
+       return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+       /* configure constant input signal to the pcie ctrl and phy */
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+       int count = 0;
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       imx6_pcie_assert_core_reset(pp);
+
+       imx6_pcie_init_phy(pp);
+
+       imx6_pcie_deassert_core_reset(pp);
+
+       dw_pcie_setup_rc(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+       while (!dw_pcie_link_up(pp)) {
+               usleep_range(100, 1000);
+               count++;
+               if (count >= 10) {
+                       dev_err(pp->dev, "phy link never came up\n");
+                       dev_dbg(pp->dev,
+                               "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+                               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+                               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+                       break;
+               }
+       }
+
+       return;
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+       u32 rc, ltssm, rx_valid, temp;
+
+       /* link is debug bit 36, debug register 1 starts at bit 32 */
+       rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+       if (rc)
+               return -EAGAIN;
+
+       /*
+        * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+        * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+        * If (MAC/LTSSM.state == Recovery.RcvrLock)
+        * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+        * to gen2 is stuck
+        */
+       pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+       ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+
+       if (rx_valid & 0x01)
+               return 0;
+
+       if (ltssm != 0x0d)
+               return 0;
+
+       dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+
+       pcie_phy_read(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, &temp);
+       temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
+               | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, temp);
+
+       usleep_range(2000, 3000);
+
+       pcie_phy_read(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, &temp);
+       temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
+               | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, temp);
+
+       return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+       .link_up = imx6_pcie_link_up,
+       .host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct pcie_port *pp,
+                       struct platform_device *pdev)
+{
+       int ret;
+
+       pp->irq = platform_get_irq(pdev, 0);
+       if (!pp->irq) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               return -ENODEV;
+       }
+
+       pp->root_bus_nr = -1;
+       pp->ops = &imx6_pcie_host_ops;
+
+       spin_lock_init(&pp->conf_lock);
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+       struct imx6_pcie *imx6_pcie;
+       struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *dbi_base;
+       int ret;
+
+       imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+       if (!imx6_pcie)
+               return -ENOMEM;
+
+       pp = &imx6_pcie->pp;
+       pp->dev = &pdev->dev;
+
+       /* Added for PCI abort handling */
+       hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+               "imprecise external abort");
+
+       dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!dbi_base) {
+               dev_err(&pdev->dev, "dbi_base memory resource not found\n");
+               return -ENODEV;
+       }
+
+       pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+       if (IS_ERR(pp->dbi_base)) {
+               dev_err(&pdev->dev, "unable to remap dbi_base\n");
+               ret = PTR_ERR(pp->dbi_base);
+               goto err;
+       }
+
+       /* Fetch GPIOs */
+       imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+       if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
+               dev_err(&pdev->dev, "no reset-gpio defined\n");
+               ret = -ENODEV;
+       }
+       ret = devm_gpio_request_one(&pdev->dev,
+                               imx6_pcie->reset_gpio,
+                               GPIOF_OUT_INIT_LOW,
+                               "PCIe reset");
+       if (ret) {
+               dev_err(&pdev->dev, "unable to get reset gpio\n");
+               goto err;
+       }
+
+       imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->power_on_gpio,
+                                       GPIOF_OUT_INIT_LOW,
+                                       "PCIe power enable");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get power-on gpio\n");
+                       goto err;
+               }
+       }
+
+       imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->wake_up_gpio,
+                                       GPIOF_IN,
+                                       "PCIe wake up");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+                       goto err;
+               }
+       }
+
+       imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->disable_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->disable_gpio,
+                                       GPIOF_OUT_INIT_HIGH,
+                                       "PCIe disable endpoint");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+                       goto err;
+               }
+       }
+
+       /* Fetch clocks */
+       imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
+       if (IS_ERR(imx6_pcie->lvds_gate)) {
+               dev_err(&pdev->dev,
+                       "lvds_gate clock select missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->lvds_gate);
+               goto err;
+       }
+
+       imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+       if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+               dev_err(&pdev->dev,
+                       "sata_ref_100m clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->sata_ref_100m);
+               goto err;
+       }
+
+       imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+       if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+               dev_err(&pdev->dev,
+                       "pcie_ref_125m clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
+               goto err;
+       }
+
+       imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+       if (IS_ERR(imx6_pcie->pcie_axi)) {
+               dev_err(&pdev->dev,
+                       "pcie_axi clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->pcie_axi);
+               goto err;
+       }
+
+       /* Grab GPR config register range */
+       imx6_pcie->iomuxc_gpr =
+                syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+       if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+               dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+               ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
+               goto err;
+       }
+
+       ret = imx6_add_pcie_port(pp, pdev);
+       if (ret < 0)
+               goto err;
+
+       platform_set_drvdata(pdev, imx6_pcie);
+       return 0;
+
+err:
+       return ret;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+       { .compatible = "fsl,imx6q-pcie", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+       .driver = {
+               .name   = "imx6q-pcie",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(imx6_pcie_of_match),
+       },
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+       return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+module_init(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
index 2e9888a0635aea41ac5e310ae39e35b564d4874f..7c4f38dd42ba6a5ef868e24e7b033998284fb4ec 100644 (file)
@@ -408,7 +408,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 
        list_for_each_entry(bus, &pcie->busses, list)
                if (bus->nr == busnr)
-                       return bus->area->addr;
+                       return (void __iomem *)bus->area->addr;
 
        bus = tegra_pcie_bus_alloc(pcie, busnr);
        if (IS_ERR(bus))
@@ -416,7 +416,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 
        list_add_tail(&bus->list, &pcie->busses);
 
-       return bus->area->addr;
+       return (void __iomem *)bus->area->addr;
 }
 
 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
index c10e9ac9bbbc81849d8e4fba538ba3b58a96aad5..896301788e9d0253bb22828f8725bb4a40d7510a 100644 (file)
  * published by the Free Software Foundation.
  */
 
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
@@ -142,6 +145,204 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
        return ret;
 }
 
+static struct irq_chip dw_msi_irq_chip = {
+       .name = "PCI-MSI",
+       .irq_enable = unmask_msi_irq,
+       .irq_disable = mask_msi_irq,
+       .irq_mask = mask_msi_irq,
+       .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+       unsigned long val;
+       int i, pos;
+
+       for (i = 0; i < MAX_MSI_CTRLS; i++) {
+               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+                               (u32 *)&val);
+               if (val) {
+                       pos = 0;
+                       while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+                               generic_handle_irq(pp->msi_irq_start
+                                       + (i * 32) + pos);
+                               pos++;
+                       }
+               }
+               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+       }
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+       pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+       /* program the msi_data */
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+                       virt_to_phys((void *)pp->msi_data));
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+       int flag = 1;
+
+       do {
+               pos = find_next_zero_bit(pp->msi_irq_in_use,
+                               MAX_MSI_IRQS, pos);
+               /*if you have reached to the end then get out from here.*/
+               if (pos == MAX_MSI_IRQS)
+                       return -ENOSPC;
+               /*
+                * Check if this position is at correct offset.nvec is always a
+                * power of two. pos0 must be nvec bit alligned.
+                */
+               if (pos % msgvec)
+                       pos += msgvec - (pos % msgvec);
+               else
+                       flag = 0;
+       } while (flag);
+
+       *pos0 = pos;
+       return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+       int res, bit, irq, pos0, pos1, i;
+       u32 val;
+       struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+       if (!pp) {
+               BUG();
+               return -EINVAL;
+       }
+
+       pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+                       MAX_MSI_IRQS);
+       if (pos0 % no_irqs) {
+               if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+                       goto no_valid_irq;
+       }
+       if (no_irqs > 1) {
+               pos1 = find_next_bit(pp->msi_irq_in_use,
+                               MAX_MSI_IRQS, pos0);
+               /* there must be nvec number of consecutive free bits */
+               while ((pos1 - pos0) < no_irqs) {
+                       if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+                               goto no_valid_irq;
+                       pos1 = find_next_bit(pp->msi_irq_in_use,
+                                       MAX_MSI_IRQS, pos0);
+               }
+       }
+
+       irq = (pp->msi_irq_start + pos0);
+
+       if ((irq + no_irqs) > (pp->msi_irq_start + MAX_MSI_IRQS-1))
+               goto no_valid_irq;
+
+       i = 0;
+       while (i < no_irqs) {
+               set_bit(pos0 + i, pp->msi_irq_in_use);
+               irq_alloc_descs((irq + i), (irq + i), 1, 0);
+               irq_set_msi_desc(irq + i, desc);
+               /*Enable corresponding interrupt in MSI interrupt controller */
+               res = ((pos0 + i) / 32) * 12;
+               bit = (pos0 + i) % 32;
+               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+               val |= 1 << bit;
+               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               i++;
+       }
+
+       *pos = pos0;
+       return irq;
+
+no_valid_irq:
+       *pos = pos0;
+       return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+       int res, bit, val, pos;
+       struct irq_desc *desc;
+       struct msi_desc *msi;
+       struct pcie_port *pp;
+
+       /* get the port structure */
+       desc = irq_to_desc(irq);
+       msi = irq_desc_get_msi_desc(desc);
+       pp = sys_to_pcie(msi->dev->bus->sysdata);
+       if (!pp) {
+               BUG();
+               return;
+       }
+
+       pos = irq - pp->msi_irq_start;
+
+       irq_free_desc(irq);
+
+       clear_bit(pos, pp->msi_irq_in_use);
+
+       /* Disable corresponding interrupt on MSI interrupt controller */
+       res = (pos / 32) * 12;
+       bit = pos % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val &= ~(1 << bit);
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+                       struct msi_desc *desc)
+{
+       int irq, pos, msgvec;
+       u16 msg_ctr;
+       struct msi_msg msg;
+       struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+       if (!pp) {
+               BUG();
+               return -EINVAL;
+       }
+
+       pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+                               &msg_ctr);
+       msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+       if (msgvec == 0)
+               msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+       if (msgvec > 5)
+               msgvec = 0;
+
+       irq = assign_irq((1 << msgvec), desc, &pos);
+       if (irq < 0)
+               return irq;
+
+       msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+       msg_ctr |= msgvec << 4;
+       pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+                               msg_ctr);
+       desc->msi_attrib.multiple = msgvec;
+
+       msg.address_lo = virt_to_phys((void *)pp->msi_data);
+       msg.address_hi = 0x0;
+       msg.data = pos;
+       write_msi_msg(irq, &msg);
+
+       return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+       clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+       .setup_irq = dw_msi_setup_irq,
+       .teardown_irq = dw_msi_teardown_irq,
+};
+
 int dw_pcie_link_up(struct pcie_port *pp)
 {
        if (pp->ops->link_up)
@@ -150,6 +351,20 @@ int dw_pcie_link_up(struct pcie_port *pp)
                return 0;
 }
 
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+                       irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+       irq_set_chip_data(irq, domain->host_data);
+       set_irq_flags(irq, IRQF_VALID);
+
+       return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+       .map = dw_pcie_msi_map,
+};
+
 int __init dw_pcie_host_init(struct pcie_port *pp)
 {
        struct device_node *np = pp->dev->of_node;
@@ -157,6 +372,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
        struct of_pci_range_parser parser;
        u32 val;
 
+       struct irq_domain *irq_domain;
+
        if (of_pci_range_parser_init(&parser, np)) {
                dev_err(pp->dev, "missing ranges property\n");
                return -EINVAL;
@@ -223,6 +440,18 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                return -EINVAL;
        }
 
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               irq_domain = irq_domain_add_linear(pp->dev->of_node,
+                                       MAX_MSI_IRQS, &msi_domain_ops,
+                                       &dw_pcie_msi_chip);
+               if (!irq_domain) {
+                       dev_err(pp->dev, "irq domain init failed\n");
+                       return -ENXIO;
+               }
+
+               pp->msi_irq_start = irq_find_mapping(irq_domain, 0);
+       }
+
        if (pp->ops->host_init)
                pp->ops->host_init(pp);
 
@@ -485,10 +714,21 @@ int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return pp->irq;
 }
 
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+               dw_pcie_msi_chip.dev = pp->dev;
+               bus->msi = &dw_pcie_msi_chip;
+       }
+}
+
 static struct hw_pci dw_pci = {
        .setup          = dw_pcie_setup,
        .scan           = dw_pcie_scan_bus,
        .map_irq        = dw_pcie_map_irq,
+       .add_bus        = dw_pcie_add_bus,
 };
 
 void dw_pcie_setup_rc(struct pcie_port *pp)
index 133820f1da971ecc19997b5e6e245f3cc992aaa4..faccbbf31907c11b01971f3105ff7ea29c9339e7 100644 (file)
@@ -20,6 +20,14 @@ struct pcie_port_info {
        phys_addr_t     mem_bus_addr;
 };
 
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS                   32
+#define MAX_MSI_CTRLS                  (MAX_MSI_IRQS / 32)
+
 struct pcie_port {
        struct device           *dev;
        u8                      root_bus_nr;
@@ -38,6 +46,10 @@ struct pcie_port {
        int                     irq;
        u32                     lanes;
        struct pcie_host_ops    *ops;
+       int                     msi_irq;
+       int                     msi_irq_start;
+       unsigned long           msi_data;
+       DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
 };
 
 struct pcie_host_ops {
@@ -57,6 +69,8 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val);
 int cfg_write(void __iomem *addr, int where, int size, u32 val);
 int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
 int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
 int dw_pcie_link_up(struct pcie_port *pp);
 void dw_pcie_setup_rc(struct pcie_port *pp);
 int dw_pcie_host_init(struct pcie_port *pp);
index 2a47e82821dacff72dd227950a6c3de828755053..1ce8ee054f1aa89ba83fb1001761e9200262a666 100644 (file)
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
        acpi_handle chandle, handle;
        struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-       flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+       flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
        if (!flags) {
                err("Invalid flags %u specified!\n", flags);
                return -EINVAL;
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
 static int pcihp_is_ejectable(acpi_handle handle)
 {
        acpi_status status;
-       acpi_handle tmp;
        unsigned long long removable;
-       status = acpi_get_handle(handle, "_ADR", &tmp);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(handle, "_ADR"))
                return 0;
-       status = acpi_get_handle(handle, "_EJ0", &tmp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "_EJ0"))
                return 1;
        status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
        if (ACPI_SUCCESS(status) && removable)
index f4e0289246672c0a9157927303945f0934e26d31..26100f510b1087f45bbe39b79dcf9649acf38e9d 100644 (file)
 #include <linux/mutex.h>
 #include <linux/pci_hotplug.h>
 
-#define dbg(format, arg...)                                    \
-       do {                                                    \
-               if (acpiphp_debug)                              \
-                       printk(KERN_DEBUG "%s: " format,        \
-                               MY_NAME , ## arg);              \
-       } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
 struct acpiphp_context;
 struct acpiphp_bridge;
 struct acpiphp_slot;
index bf2203ef1308bfa13f3e8b2744db6bd0b2adbcb8..8650d39db3922c74d36a01029068ca7b03c75935 100644 (file)
@@ -31,6 +31,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/smp.h>
 #include "acpiphp.h"
 
-#define MY_NAME        "acpiphp"
-
 /* name size which is used for entries in pcihpfs */
 #define SLOT_NAME_SIZE  21              /* {_SUN} */
 
-bool acpiphp_debug;
 bool acpiphp_disabled;
 
 /* local variables */
@@ -61,9 +60,7 @@ static struct acpiphp_attention_info *attention_info;
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(disable, "disable acpiphp driver");
-module_param_named(debug, acpiphp_debug, bool, 0644);
 module_param_named(disable, acpiphp_disabled, bool, 0444);
 
 /* export the attention callback registration methods */
@@ -139,7 +136,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        /* enable the specified slot */
        return acpiphp_enable_slot(slot->acpi_slot);
@@ -156,7 +153,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        /* disable the specified slot */
        return acpiphp_disable_and_eject_slot(slot->acpi_slot);
@@ -176,8 +173,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
  {
        int retval = -ENODEV;
 
-       dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+       pr_debug("%s - physical_slot = %s\n", __func__,
+               hotplug_slot_name(hotplug_slot));
+
        if (attention_info && try_module_get(attention_info->owner)) {
                retval = attention_info->set_attn(hotplug_slot, status);
                module_put(attention_info->owner);
@@ -199,7 +197,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_power_status(slot->acpi_slot);
 
@@ -221,7 +219,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        int retval = -EINVAL;
 
-       dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+       pr_debug("%s - physical_slot = %s\n", __func__,
+               hotplug_slot_name(hotplug_slot));
 
        if (attention_info && try_module_get(attention_info->owner)) {
                retval = attention_info->get_attn(hotplug_slot, value);
@@ -244,7 +243,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_latch_status(slot->acpi_slot);
 
@@ -264,7 +263,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_adapter_status(slot->acpi_slot);
 
@@ -279,7 +278,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        kfree(slot->hotplug_slot);
        kfree(slot);
@@ -322,11 +321,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
        if (retval == -EBUSY)
                goto error_hpslot;
        if (retval) {
-               err("pci_hp_register failed with error %d\n", retval);
+               pr_err("pci_hp_register failed with error %d\n", retval);
                goto error_hpslot;
        }
 
-       info("Slot [%s] registered\n", slot_name(slot));
+       pr_info("Slot [%s] registered\n", slot_name(slot));
 
        return 0;
 error_hpslot:
@@ -343,17 +342,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
        struct slot *slot = acpiphp_slot->slot;
        int retval = 0;
 
-       info("Slot [%s] unregistered\n", slot_name(slot));
+       pr_info("Slot [%s] unregistered\n", slot_name(slot));
 
        retval = pci_hp_deregister(slot->hotplug_slot);
        if (retval)
-               err("pci_hp_deregister failed with error %d\n", retval);
+               pr_err("pci_hp_deregister failed with error %d\n", retval);
 }
 
 
 void __init acpiphp_init(void)
 {
-       info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+       pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
                acpiphp_disabled ? ", disabled by user; please report a bug"
                                 : "");
 }
index 0b7d23b4ad954489b657480d0ee08a3b7b2d81d8..9d066b86c72421aa82683a3a7ba31c05d7021869 100644 (file)
@@ -39,6 +39,8 @@
  *    bus. It loses the refcount when the the driver unloads.
  */
 
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 
@@ -58,8 +60,6 @@ static LIST_HEAD(bridge_list);
 static DEFINE_MUTEX(bridge_mutex);
 static DEFINE_MUTEX(acpiphp_context_lock);
 
-#define MY_NAME "acpiphp_glue"
-
 static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
@@ -335,7 +335,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                if (ACPI_FAILURE(status))
                        sun = bridge->nr_slots;
 
-               dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+               pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
                    sun, pci_domain_nr(pbus), pbus->number, device);
 
                retval = acpiphp_register_hotplug_slot(slot, sun);
@@ -343,10 +343,10 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                        slot->slot = NULL;
                        bridge->nr_slots--;
                        if (retval == -EBUSY)
-                               warn("Slot %llu already registered by another "
+                               pr_warn("Slot %llu already registered by another "
                                        "hotplug driver\n", sun);
                        else
-                               warn("acpiphp_register_hotplug_slot failed "
+                               pr_warn("acpiphp_register_hotplug_slot failed "
                                        "(err code = 0x%x)\n", retval);
                }
                /* Even if the slot registration fails, we can still use it. */
@@ -369,7 +369,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                if (register_hotplug_dock_device(handle,
                        &acpiphp_dock_ops, context,
                        acpiphp_dock_init, acpiphp_dock_release))
-                       dbg("failed to register dock device\n");
+                       pr_debug("failed to register dock device\n");
        }
 
        /* install notify handler */
@@ -427,7 +427,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
                                                        ACPI_SYSTEM_NOTIFY,
                                                        handle_hotplug_event);
                                if (ACPI_FAILURE(status))
-                                       err("failed to remove notify handler\n");
+                                       pr_err("failed to remove notify handler\n");
                        }
                }
                if (slot->slot)
@@ -830,8 +830,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                /* bus re-enumerate */
-               dbg("%s: Bus check notify on %s\n", __func__, objname);
-               dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+               pr_debug("%s: Bus check notify on %s\n", __func__, objname);
+               pr_debug("%s: re-enumerating slots under %s\n",
+                        __func__, objname);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
                } else {
@@ -845,7 +846,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                /* device check */
-               dbg("%s: Device check notify on %s\n", __func__, objname);
+               pr_debug("%s: Device check notify on %s\n", __func__, objname);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
                } else {
@@ -866,7 +867,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                /* request device eject */
-               dbg("%s: Device eject notify on %s\n", __func__, objname);
+               pr_debug("%s: Device eject notify on %s\n", __func__, objname);
                acpiphp_disable_and_eject_slot(func->slot);
                break;
        }
@@ -994,14 +995,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
 
                /*
                 * This bridge should have been registered as a hotplug function
-                * under its parent, so the context has to be there.  If not, we
-                * are in deep goo.
+                * under its parent, so the context should be there, unless the
+                * parent is going to be handled by pciehp, in which case this
+                * bridge is not interesting to us either.
                 */
                mutex_lock(&acpiphp_context_lock);
                context = acpiphp_get_context(handle);
-               if (WARN_ON(!context)) {
+               if (!context) {
                        mutex_unlock(&acpiphp_context_lock);
                        put_device(&bus->dev);
+                       pci_dev_put(bridge->pci_dev);
                        kfree(bridge);
                        return;
                }
index 2f5786c8522c2f170d8f57bc08c592e0ef78048a..0d64c414bf7876a74baf7acc4ebf6664c8bc7542 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #define DRIVER_AUTHOR  "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
 #define DRIVER_DESC    "ACPI Hot Plug PCI Controller Driver IBM extension"
 
-static bool debug;
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...)                            \
-do {                                                   \
-       if (debug)                                      \
-               printk(KERN_DEBUG "%s: " format,        \
-                               MY_NAME , ## arg);      \
-} while (0)
 
 #define FOUND_APCI 0x61504349
 /* these are the names for the IBM ACPI pseudo-device */
@@ -189,7 +179,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
 
        ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
 
-       dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+       pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
                        ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
                        (status ? 1 : 0));
 
@@ -202,10 +192,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
 
        stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
        if (ACPI_FAILURE(stat)) {
-               err("APLS evaluation failed:  0x%08x\n", stat);
+               pr_err("APLS evaluation failed:  0x%08x\n", stat);
                return -ENODEV;
        } else if (!rc) {
-               err("APLS method failed:  0x%08llx\n", rc);
+               pr_err("APLS method failed:  0x%08llx\n", rc);
                return -ERANGE;
        }
        return 0;
@@ -234,7 +224,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
        else
                *status = 0;
 
-       dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+       pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
                        ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
                        *status);
 
@@ -266,10 +256,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
        u8 subevent = event & 0xf0;
        struct notification *note = context;
 
-       dbg("%s: Received notification %02x\n", __func__, event);
+       pr_debug("%s: Received notification %02x\n", __func__, event);
 
        if (subevent == 0x80) {
-               dbg("%s: generationg bus event\n", __func__);
+               pr_debug("%s: generationg bus event\n", __func__);
                acpi_bus_generate_netlink_event(note->device->pnp.device_class,
                                                  dev_name(&note->device->dev),
                                                  note->event, detail);
@@ -301,7 +291,7 @@ static int ibm_get_table_from_acpi(char **bufp)
 
        status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
-               err("%s:  APCI evaluation failed\n", __func__);
+               pr_err("%s:  APCI evaluation failed\n", __func__);
                return -ENODEV;
        }
 
@@ -309,13 +299,13 @@ static int ibm_get_table_from_acpi(char **bufp)
        if (!(package) ||
                        (package->type != ACPI_TYPE_PACKAGE) ||
                        !(package->package.elements)) {
-               err("%s:  Invalid APCI object\n", __func__);
+               pr_err("%s:  Invalid APCI object\n", __func__);
                goto read_table_done;
        }
 
        for(size = 0, i = 0; i < package->package.count; i++) {
                if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
-                       err("%s:  Invalid APCI element %d\n", __func__, i);
+                       pr_err("%s:  Invalid APCI element %d\n", __func__, i);
                        goto read_table_done;
                }
                size += package->package.elements[i].buffer.length;
@@ -325,7 +315,7 @@ static int ibm_get_table_from_acpi(char **bufp)
                goto read_table_done;
 
        lbuf = kzalloc(size, GFP_KERNEL);
-       dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+       pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
                        __func__, package->package.count, size, lbuf);
 
        if (lbuf) {
@@ -370,8 +360,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
 {
        int bytes_read = -EINVAL;
        char *table = NULL;
-       
-       dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+       pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
 
        if (pos == 0) {
                bytes_read = ibm_get_table_from_acpi(&table);
@@ -403,7 +393,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
 
        status = acpi_get_object_info(handle, &info);
        if (ACPI_FAILURE(status)) {
-               err("%s:  Failed to get device information status=0x%x\n",
+               pr_err("%s:  Failed to get device information status=0x%x\n",
                        __func__, status);
                return retval;
        }
@@ -411,7 +401,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
        if (info->current_status && (info->valid & ACPI_VALID_HID) &&
                        (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
                         !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
-               dbg("found hardware: %s, handle: %p\n",
+               pr_debug("found hardware: %s, handle: %p\n",
                        info->hardware_id.string, handle);
                *phandle = handle;
                /* returning non-zero causes the search to stop
@@ -432,18 +422,18 @@ static int __init ibm_acpiphp_init(void)
        struct acpi_device *device;
        struct kobject *sysdir = &pci_slots_kset->kobj;
 
-       dbg("%s\n", __func__);
+       pr_debug("%s\n", __func__);
 
        if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                        ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
                        &ibm_acpi_handle, NULL) != FOUND_APCI) {
-               err("%s: acpi_walk_namespace failed\n", __func__);
+               pr_err("%s: acpi_walk_namespace failed\n", __func__);
                retval = -ENODEV;
                goto init_return;
        }
-       dbg("%s: found IBM aPCI device\n", __func__);
+       pr_debug("%s: found IBM aPCI device\n", __func__);
        if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
-               err("%s: acpi_bus_get_device failed\n", __func__);
+               pr_err("%s: acpi_bus_get_device failed\n", __func__);
                retval = -ENODEV;
                goto init_return;
        }
@@ -457,7 +447,7 @@ static int __init ibm_acpiphp_init(void)
                        ACPI_DEVICE_NOTIFY, ibm_handle_events,
                        &ibm_note);
        if (ACPI_FAILURE(status)) {
-               err("%s: Failed to register notification handler\n",
+               pr_err("%s: Failed to register notification handler\n",
                                __func__);
                retval = -EBUSY;
                goto init_cleanup;
@@ -479,17 +469,17 @@ static void __exit ibm_acpiphp_exit(void)
        acpi_status status;
        struct kobject *sysdir = &pci_slots_kset->kobj;
 
-       dbg("%s\n", __func__);
+       pr_debug("%s\n", __func__);
 
        if (acpiphp_unregister_attention(&ibm_attention_info))
-               err("%s: attention info deregistration failed", __func__);
+               pr_err("%s: attention info deregistration failed", __func__);
 
        status = acpi_remove_notify_handler(
                           ibm_acpi_handle,
                           ACPI_DEVICE_NOTIFY,
                           ibm_handle_events);
        if (ACPI_FAILURE(status))
-               err("%s: Notification handler removal failed\n", __func__);
+               pr_err("%s: Notification handler removal failed\n", __func__);
        /* remove the /sys entries */
        sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
 }
index 66e505ca24ef418a250219789faeb8bd1de8a9be..3c7eb5dd91c636c17e7dadd374022096455d24b1 100644 (file)
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
        kfree(slot->hotplug_slot->info);
        kfree(slot->hotplug_slot);
        kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
        snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
        rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
                             ZPCI_DEVFN, name);
-       if (rc) {
-               pr_err("pci_hp_register failed with error %d\n", rc);
+       if (rc)
                goto error_reg;
-       }
+
        list_add(&slot->slot_list, &s390_hotplug_slot_list);
        return 0;
 
index e260f207a90e2f22b3be53388e6cc5ab1b294920..d876e4b3c6a98d8412f154a4f63814ec5b27682c 100644 (file)
@@ -191,7 +191,7 @@ static inline const char *slot_name(struct slot *slot)
 #include <linux/pci-acpi.h>
 static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
 {
-       u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+       u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
        return acpi_get_hp_hw_control_from_firmware(dev, flags);
 }
 #else
index 7c29ee4ed0ae5559077b7b4efd69eb7ff2fd871a..dfd1f59de729c6293416d789fb6f665dc09bf701 100644 (file)
@@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
        if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
                return;
 
+       if (pci_dev->pme_poll)
+               pci_dev->pme_poll = false;
+
        if (pci_dev->current_state == PCI_D3cold) {
                pci_wakeup_event(pci_dev);
                pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
        if (pci_dev->pme_support)
                pci_check_pme_status(pci_dev);
 
-       if (pci_dev->pme_poll)
-               pci_dev->pme_poll = false;
-
        pci_wakeup_event(pci_dev);
        pm_runtime_resume(&pci_dev->dev);
 
@@ -181,7 +181,6 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev)
 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
        acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
-       acpi_handle tmp;
        static const u8 state_conv[] = {
                [PCI_D0] = ACPI_STATE_D0,
                [PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        int error = -EINVAL;
 
        /* If the ACPI device has _EJ0, ignore the device */
-       if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+       if (!handle || acpi_has_method(handle, "_EJ0"))
                return -ENODEV;
 
        switch (state) {
index e8ccf6c0f08a3d4d5a9b03aec428efddcba77c6f..2f2eedceda34c6c80e0f010fb56144e50a866e29 100644 (file)
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
 
        pci_enable_bridge(dev->bus->self);
 
-       if (pci_is_enabled(dev))
+       if (pci_is_enabled(dev)) {
+               if (!dev->is_busmaster) {
+                       dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+                       pci_set_master(dev);
+               }
                return;
+       }
+
        retval = pci_enable_device(dev);
        if (retval)
                dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
@@ -2854,7 +2860,7 @@ void __weak pcibios_set_master(struct pci_dev *dev)
                lat = pcibios_max_latency;
        else
                return;
-       dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+
        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 
index 7ef0f868b3e07bf48941075e269ef3976d3efcf1..5e14f5a51357cabd86d8e420e7684d3122b30006 100644 (file)
@@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
                return;
        }
 
-       pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
-       if (pos) {
+       if (pci_is_pcie(bridge)) {
                u32 linkcap;
                u16 linksta;
 
@@ -984,7 +983,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (!pos)
                return;
-       pdev->is_pcie = 1;
        pdev->pcie_cap = pos;
        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
        pdev->pcie_flags_reg = reg16;
index f6c31fabf3af0bbe24f20b949cc0b6373b11ab97..91490453c2296878f19ff8105e9dbbec4d4878b1 100644 (file)
@@ -2954,6 +2954,29 @@ static void disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
+/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+       dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
 /*
  * Some devices may pass our check in pci_intx_mask_supported if
  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
index bc26d7990cc3744512c717540565b7c0dfa557ad..4ce83b26ae9ef2dcbdf9e29c81d55751a9525a7e 100644 (file)
@@ -982,7 +982,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
        }
 
        min_align = calculate_mem_align(aligns, max_order);
-       min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
+       min_align = max(min_align, window_alignment(bus, b_res->flags));
        size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
        if (children_add_size > add_size)
                add_size = children_add_size;
@@ -1136,7 +1136,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
        }
 
        /* The root bus? */
-       if (!bus->self)
+       if (pci_is_root_bus(bus))
                return;
 
        switch (bus->self->class >> 8) {
index a138965c01cbc6e4029615bc080790d2338e59e2..b8fcc38c0d116a97cf2b6f4142b026f8b8f94afb 100644 (file)
@@ -490,7 +490,7 @@ exit:
  * <devicename> <state> <pinname> are values that should match the pinctrl-maps
  * <newvalue> reflects the new config and is driver dependant
  */
-static int pinconf_dbg_config_write(struct file *file,
+static ssize_t pinconf_dbg_config_write(struct file *file,
        const char __user *user_buf, size_t count, loff_t *ppos)
 {
        struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
        int i;
 
        /* Get userspace string and assure termination */
-       buf_size = min(count, (size_t)(sizeof(buf)-1));
+       buf_size = min(count, sizeof(buf) - 1);
        if (copy_from_user(buf, user_buf, buf_size))
                return -EFAULT;
        buf[buf_size] = 0;
index 2689f8d01a1e202d72f226743e0ec8bcdba2b1d8..155b1b3a0e7a71597d26d3c0c1cab2a525a9ee19 100644 (file)
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
 /* pin banks of s5pv210 pin-controller */
 static struct samsung_pin_bank s5pv210_pin_bank[] = {
        EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
-       EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
        EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
        EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
        EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
        EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
-       EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18),
-       EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20),
-       EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24),
+       EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
+       EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
+       EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
+       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
        EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
        EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30),
+       EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
        EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
        EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
        EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
index 82638fac3cfa6a3f40c6ce1f423d3146a5b29e02..30c4d356cb33510a550aec1d467ac40936552a60 100644 (file)
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
                param = pinconf_to_config_param(configs[i]);
                param_val = pinconf_to_config_argument(configs[i]);
 
+               if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
+                       continue;
+
                switch (param) {
-               case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
-                       return 0;
                case PIN_CONFIG_BIAS_DISABLE:
                case PIN_CONFIG_BIAS_PULL_UP:
                case PIN_CONFIG_BIAS_PULL_DOWN:
index 622c4854977e63471b6e77cd950184a3dd3f13f8..93c9e3899d5e4a0230e91a7269c68b7d7d8433f4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
  *
- * Arthur:  Pritesh Raithatha <praithatha@nvidia.com>
+ * Author:  Pritesh Raithatha <praithatha@nvidia.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
 };
 module_platform_driver(tegra114_pinctrl_driver);
 
-MODULE_ALIAS("platform:tegra114-pinctrl");
 MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver");
+MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
 MODULE_LICENSE("GPL v2");
index 96d6b2eef4f2a1209f2c9f113e3932b8eb812738..b51a7460cc49bc03b4c055e8f46bfe2fedf718ff 100644 (file)
@@ -504,6 +504,7 @@ config ASUS_WMI
        depends on BACKLIGHT_CLASS_DEVICE
        depends on RFKILL || RFKILL = n
        depends on HOTPLUG_PCI
+       depends on ACPI_VIDEO || ACPI_VIDEO = n
        select INPUT_SPARSEKMAP
        select LEDS_CLASS
        select NEW_LEDS
index a6afd4108beb0592c15604efa9f779180d5349ef..aefcc32e563479d2b22404fdd75ceb7072423d16 100644 (file)
@@ -190,16 +190,10 @@ struct eeepc_laptop {
  */
 static int write_acpi_int(acpi_handle handle, const char *method, int val)
 {
-       struct acpi_object_list params;
-       union acpi_object in_obj;
        acpi_status status;
 
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = val;
+       status = acpi_execute_simple_method(handle, (char *)method, val);
 
-       status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
        return (status == AE_OK ? 0 : -1);
 }
 
index 52b8a97efde150f52d393b4e7a6c0be9360bc9ef..9d30d69aa78f24a3bbb1caa71f797f9c520bbc5c 100644 (file)
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
        { .type = ACPI_TYPE_INTEGER }
        };
        struct acpi_object_list arg_list = { 4, &params[0] };
-       struct acpi_buffer output;
-       union acpi_object out_obj;
+       unsigned long long value;
        acpi_handle handle = NULL;
 
        status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
        params[2].integer.value = arg1;
        params[3].integer.value = arg2;
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
-
-       status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+       status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
        if (ACPI_FAILURE(status)) {
                vdbg_printk(FUJLAPTOP_DBG_WARN,
                        "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
                return -ENODEV;
        }
 
-       if (out_obj.type != ACPI_TYPE_INTEGER) {
-               vdbg_printk(FUJLAPTOP_DBG_WARN,
-                       "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
-                       "return an integer\n",
-                       cmd, arg0, arg1, arg2);
-               return -ENODEV;
-       }
-
        vdbg_printk(FUJLAPTOP_DBG_TRACE,
                "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
-                       cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
-       return out_obj.integer.value;
+                       cmd, arg0, arg1, arg2, (int)value);
+       return value;
 }
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
 static int set_lcd_level(int level)
 {
        acpi_status status = AE_OK;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
        acpi_handle handle = NULL;
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@ static int set_lcd_level(int level)
                return -ENODEV;
        }
 
-       arg0.integer.value = level;
 
-       status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+       status = acpi_execute_simple_method(handle, NULL, level);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
@@ -345,8 +330,6 @@ static int set_lcd_level(int level)
 static int set_lcd_level_alt(int level)
 {
        acpi_status status = AE_OK;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
        acpi_handle handle = NULL;
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level)
                return -ENODEV;
        }
 
-       arg0.integer.value = level;
-
-       status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+       status = acpi_execute_simple_method(handle, NULL, level);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = {
 
 static void dmi_check_cb_common(const struct dmi_system_id *id)
 {
-       acpi_handle handle;
        pr_info("Identified laptop model '%s'\n", id->ident);
        if (use_alt_lcd_levels == -1) {
-               if (ACPI_SUCCESS(acpi_get_handle(NULL,
-                               "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+               if (acpi_has_method(NULL,
+                               "\\_SB.PCI0.LPCB.FJEX.SBL2"))
                        use_alt_lcd_levels = 1;
                else
                        use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
 
 static int acpi_fujitsu_add(struct acpi_device *device)
 {
-       acpi_handle handle;
        int result = 0;
        int state = 0;
        struct input_dev *input;
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
 
        fujitsu->dev = device;
 
-       if (ACPI_SUCCESS
-           (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+       if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
                if (ACPI_FAILURE
                    (acpi_evaluate_object
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
 
 static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 {
-       acpi_handle handle;
        int result = 0;
        int state = 0;
        struct input_dev *input;
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 
        fujitsu_hotkey->dev = device;
 
-       if (ACPI_SUCCESS
-           (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+       if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
                if (ACPI_FAILURE
                    (acpi_evaluate_object
index 89c4519d48ac80d2a54f42ceabf7a48b792e871c..6788acc22ab97f01b410240eef0a5b2082c98550 100644 (file)
@@ -72,8 +72,15 @@ enum {
        VPCCMD_W_BL_POWER = 0x33,
 };
 
+struct ideapad_rfk_priv {
+       int dev;
+       struct ideapad_private *priv;
+};
+
 struct ideapad_private {
+       struct acpi_device *adev;
        struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+       struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
        struct platform_device *platform_device;
        struct input_dev *inputdev;
        struct backlight_device *blightdev;
@@ -81,8 +88,6 @@ struct ideapad_private {
        unsigned long cfg;
 };
 
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
 static bool no_bt_rfkill;
 module_param(no_bt_rfkill, bool, 0444);
 MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
  */
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
+       struct ideapad_private *priv = s->private;
        unsigned long value;
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+       if (!priv)
+               return -EINVAL;
+
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
                seq_printf(s, "Backlight max:\t%lu\n", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
                seq_printf(s, "Backlight now:\t%lu\n", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
                seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
        seq_printf(s, "=====================\n");
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
                seq_printf(s, "Radio status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
                seq_printf(s, "Wifi status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
                seq_printf(s, "BT status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
                seq_printf(s, "3G status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
        seq_printf(s, "=====================\n");
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
                seq_printf(s, "Touchpad status:%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
                seq_printf(s, "Camera status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
 
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data)
 
 static int debugfs_status_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, debugfs_status_show, NULL);
+       return single_open(file, debugfs_status_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = {
 
 static int debugfs_cfg_show(struct seq_file *s, void *data)
 {
-       if (!ideapad_priv) {
+       struct ideapad_private *priv = s->private;
+
+       if (!priv) {
                seq_printf(s, "cfg: N/A\n");
        } else {
                seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
-                          ideapad_priv->cfg);
-               if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+                          priv->cfg);
+               if (test_bit(CFG_BT_BIT, &priv->cfg))
                        seq_printf(s, "Bluetooth ");
-               if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_3G_BIT, &priv->cfg))
                        seq_printf(s, "3G ");
-               if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_WIFI_BIT, &priv->cfg))
                        seq_printf(s, "Wireless ");
-               if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
                        seq_printf(s, "Camera ");
                seq_printf(s, "\nGraphic: ");
-               switch ((ideapad_priv->cfg)&0x700) {
+               switch ((priv->cfg)&0x700) {
                case 0x100:
                        seq_printf(s, "Intel");
                        break;
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
 
 static int debugfs_cfg_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, debugfs_cfg_show, NULL);
+       return single_open(file, debugfs_cfg_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv)
                goto errout;
        }
 
-       node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+       node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
                                   &debugfs_cfg_fops);
        if (!node) {
                pr_err("failed to create cfg in debugfs");
                goto errout;
        }
 
-       node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+       node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
                                   &debugfs_status_fops);
        if (!node) {
                pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev,
                                char *buf)
 {
        unsigned long result;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
                return sprintf(buf, "-1\n");
        return sprintf(buf, "%lu\n", result);
 }
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
                                 const char *buf, size_t count)
 {
        int ret, state;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
        if (!count)
                return 0;
        if (sscanf(buf, "%i", &state) != 1)
                return -EINVAL;
-       ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+       ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
        if (ret < 0)
                return -EIO;
        return count;
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev,
                                char *buf)
 {
        unsigned long result;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
                return sprintf(buf, "-1\n");
        return sprintf(buf, "%lu\n", result);
 }
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
                                 const char *buf, size_t count)
 {
        int ret, state;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
        if (!count)
                return 0;
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
                return -EINVAL;
        if (state < 0 || state > 4 || state == 3)
                return -EINVAL;
-       ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+       ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
        if (ret < 0)
                return -EIO;
        return count;
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
                supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
        else if (attr == &dev_attr_fan_mode.attr) {
                unsigned long value;
-               supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+               supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+                                         &value);
        } else
                supported = true;
 
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
 
 static int ideapad_rfk_set(void *data, bool blocked)
 {
-       unsigned long opcode = (unsigned long)data;
+       struct ideapad_rfk_priv *priv = data;
 
-       return write_ec_cmd(ideapad_handle, opcode, !blocked);
+       return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
        unsigned long hw_blocked;
        int i;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
                return;
        hw_blocked = !hw_blocked;
 
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
                        rfkill_set_hw_state(priv->rfk[i], hw_blocked);
 }
 
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
        int ret;
        unsigned long sw_blocked;
 
        if (no_bt_rfkill &&
            (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
                /* Force to enable bluetooth when no_bt_rfkill=1 */
-               write_ec_cmd(ideapad_handle,
+               write_ec_cmd(priv->adev->handle,
                             ideapad_rfk_data[dev].opcode, 1);
                return 0;
        }
-
-       priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
-                                     ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
-                                     (void *)(long)dev);
+       priv->rfk_priv[dev].dev = dev;
+       priv->rfk_priv[dev].priv = priv;
+
+       priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+                                     &priv->platform_device->dev,
+                                     ideapad_rfk_data[dev].type,
+                                     &ideapad_rfk_ops,
+                                     &priv->rfk_priv[dev]);
        if (!priv->rfk[dev])
                return -ENOMEM;
 
-       if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+       if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
                         &sw_blocked)) {
                rfkill_init_sw_state(priv->rfk[dev], 0);
        } else {
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
        return 0;
 }
 
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
        if (!priv->rfk[dev])
                return;
 
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
 /*
  * Platform device
  */
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
 {
-       int result;
-
-       priv->platform_device = platform_device_alloc("ideapad", -1);
-       if (!priv->platform_device)
-               return -ENOMEM;
-       platform_set_drvdata(priv->platform_device, priv);
-
-       result = platform_device_add(priv->platform_device);
-       if (result)
-               goto fail_platform_device;
-
-       result = sysfs_create_group(&priv->platform_device->dev.kobj,
+       return sysfs_create_group(&priv->platform_device->dev.kobj,
                                    &ideapad_attribute_group);
-       if (result)
-               goto fail_sysfs;
-       return 0;
-
-fail_sysfs:
-       platform_device_del(priv->platform_device);
-fail_platform_device:
-       platform_device_put(priv->platform_device);
-       return result;
 }
 
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
 {
        sysfs_remove_group(&priv->platform_device->dev.kobj,
                           &ideapad_attribute_group);
-       platform_device_unregister(priv->platform_device);
 }
 
 /*
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
 {
        unsigned long long_pressed;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
                return;
        if (long_pressed)
                ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
 {
        unsigned long bit, value;
 
-       read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+       read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
 
        for (bit = 0; bit < 16; bit++) {
                if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
  */
 static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
 {
+       struct ideapad_private *priv = bl_get_data(blightdev);
        unsigned long now;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+       if (!priv)
+               return -EINVAL;
+
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
                return -EIO;
        return now;
 }
 
 static int ideapad_backlight_update_status(struct backlight_device *blightdev)
 {
-       if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+       struct ideapad_private *priv = bl_get_data(blightdev);
+
+       if (!priv)
+               return -EINVAL;
+
+       if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
                         blightdev->props.brightness))
                return -EIO;
-       if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+       if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
                         blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
                return -EIO;
 
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
        struct backlight_properties props;
        unsigned long max, now, power;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
                return -EIO;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
                return -EIO;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
                return -EIO;
 
        memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
 
        if (!blightdev)
                return;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
                return;
        blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
 }
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
 
        /* if we control brightness via acpi video driver */
        if (priv->blightdev == NULL) {
-               read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+               read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
                return;
        }
 
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
 /*
  * module init/exit
  */
-static const struct acpi_device_id ideapad_device_ids[] = {
-       { "VPC2004", 0},
-       { "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
        unsigned long value;
 
        /* Without reading from EC touchpad LED doesn't switch state */
-       if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
                 * switch the LED state. We (de)activate KBC AUX port to turn
                 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
        }
 }
 
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+       struct ideapad_private *priv = data;
+       unsigned long vpc1, vpc2, vpc_bit;
+
+       if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+               return;
+       if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+               return;
+
+       vpc1 = (vpc2 << 8) | vpc1;
+       for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+               if (test_bit(vpc_bit, &vpc1)) {
+                       switch (vpc_bit) {
+                       case 9:
+                               ideapad_sync_rfk_state(priv);
+                               break;
+                       case 13:
+                       case 11:
+                       case 7:
+                       case 6:
+                               ideapad_input_report(priv, vpc_bit);
+                               break;
+                       case 5:
+                               ideapad_sync_touchpad_state(priv);
+                               break;
+                       case 4:
+                               ideapad_backlight_notify_brightness(priv);
+                               break;
+                       case 3:
+                               ideapad_input_novokey(priv);
+                               break;
+                       case 2:
+                               ideapad_backlight_notify_power(priv);
+                               break;
+                       case 0:
+                               ideapad_check_special_buttons(priv);
+                               break;
+                       default:
+                               pr_info("Unknown event: %lu\n", vpc_bit);
+                       }
+               }
+       }
+}
+
+static int ideapad_acpi_add(struct platform_device *pdev)
 {
        int ret, i;
        int cfg;
        struct ideapad_private *priv;
+       struct acpi_device *adev;
+
+       ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+       if (ret)
+               return -ENODEV;
 
-       if (read_method_int(adevice->handle, "_CFG", &cfg))
+       if (read_method_int(adev->handle, "_CFG", &cfg))
                return -ENODEV;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
-       dev_set_drvdata(&adevice->dev, priv);
-       ideapad_priv = priv;
-       ideapad_handle = adevice->handle;
+
+       dev_set_drvdata(&pdev->dev, priv);
        priv->cfg = cfg;
+       priv->adev = adev;
+       priv->platform_device = pdev;
 
-       ret = ideapad_platform_init(priv);
+       ret = ideapad_sysfs_init(priv);
        if (ret)
-               goto platform_failed;
+               goto sysfs_failed;
 
        ret = ideapad_debugfs_init(priv);
        if (ret)
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
 
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
                if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
-                       ideapad_register_rfkill(adevice, i);
+                       ideapad_register_rfkill(priv, i);
                else
                        priv->rfk[i] = NULL;
        }
        ideapad_sync_rfk_state(priv);
-       ideapad_sync_touchpad_state(adevice);
+       ideapad_sync_touchpad_state(priv);
 
        if (!acpi_video_backlight_support()) {
                ret = ideapad_backlight_init(priv);
                if (ret && ret != -ENODEV)
                        goto backlight_failed;
        }
+       ret = acpi_install_notify_handler(adev->handle,
+               ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+       if (ret)
+               goto notification_failed;
 
        return 0;
-
+notification_failed:
+       ideapad_backlight_exit(priv);
 backlight_failed:
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-               ideapad_unregister_rfkill(adevice, i);
+               ideapad_unregister_rfkill(priv, i);
        ideapad_input_exit(priv);
 input_failed:
        ideapad_debugfs_exit(priv);
 debugfs_failed:
-       ideapad_platform_exit(priv);
-platform_failed:
+       ideapad_sysfs_exit(priv);
+sysfs_failed:
        kfree(priv);
        return ret;
 }
 
-static int ideapad_acpi_remove(struct acpi_device *adevice)
+static int ideapad_acpi_remove(struct platform_device *pdev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+       struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
        int i;
 
+       acpi_remove_notify_handler(priv->adev->handle,
+               ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
        ideapad_backlight_exit(priv);
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-               ideapad_unregister_rfkill(adevice, i);
+               ideapad_unregister_rfkill(priv, i);
        ideapad_input_exit(priv);
        ideapad_debugfs_exit(priv);
-       ideapad_platform_exit(priv);
-       dev_set_drvdata(&adevice->dev, NULL);
+       ideapad_sysfs_exit(priv);
+       dev_set_drvdata(&pdev->dev, NULL);
        kfree(priv);
 
        return 0;
 }
 
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-       acpi_handle handle = adevice->handle;
-       unsigned long vpc1, vpc2, vpc_bit;
-
-       if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
-               return;
-       if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
-               return;
+       struct ideapad_private *priv;
 
-       vpc1 = (vpc2 << 8) | vpc1;
-       for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
-               if (test_bit(vpc_bit, &vpc1)) {
-                       switch (vpc_bit) {
-                       case 9:
-                               ideapad_sync_rfk_state(priv);
-                               break;
-                       case 13:
-                       case 11:
-                       case 7:
-                       case 6:
-                               ideapad_input_report(priv, vpc_bit);
-                               break;
-                       case 5:
-                               ideapad_sync_touchpad_state(adevice);
-                               break;
-                       case 4:
-                               ideapad_backlight_notify_brightness(priv);
-                               break;
-                       case 3:
-                               ideapad_input_novokey(priv);
-                               break;
-                       case 2:
-                               ideapad_backlight_notify_power(priv);
-                               break;
-                       case 0:
-                               ideapad_check_special_buttons(priv);
-                               break;
-                       default:
-                               pr_info("Unknown event: %lu\n", vpc_bit);
-                       }
-               }
-       }
-}
+       if (!device)
+               return -EINVAL;
+       priv = dev_get_drvdata(device);
 
-static int ideapad_acpi_resume(struct device *device)
-{
-       ideapad_sync_rfk_state(ideapad_priv);
-       ideapad_sync_touchpad_state(to_acpi_device(device));
+       ideapad_sync_rfk_state(priv);
+       ideapad_sync_touchpad_state(priv);
        return 0;
 }
-
+#endif
 static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
 
-static struct acpi_driver ideapad_acpi_driver = {
-       .name = "ideapad_acpi",
-       .class = "IdeaPad",
-       .ids = ideapad_device_ids,
-       .ops.add = ideapad_acpi_add,
-       .ops.remove = ideapad_acpi_remove,
-       .ops.notify = ideapad_acpi_notify,
-       .drv.pm = &ideapad_pm,
-       .owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+       { "VPC2004", 0},
+       { "", 0},
 };
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+       .probe = ideapad_acpi_add,
+       .remove = ideapad_acpi_remove,
+       .driver = {
+               .name   = "ideapad_acpi",
+               .owner  = THIS_MODULE,
+               .pm     = &ideapad_pm,
+               .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+       },
+};
+
+module_platform_driver(ideapad_acpi_driver);
 
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 MODULE_DESCRIPTION("IdeaPad ACPI Extras");
index 41b740cb28bca8743ea282bbf2fb5639a2f4266c..a2083a9e5662d660fd95cbb297815aa0570c8268 100644 (file)
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
                                       char *buf)
 {
        struct acpi_device *acpi;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
+       unsigned long long value;
        acpi_status status;
 
        acpi = to_acpi_device(dev);
 
-       status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       return sprintf(buf, "%lld\n", result->integer.value);
+       return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
                                        const char *buf, size_t count)
 {
        struct acpi_device *acpi;
-       struct acpi_object_list input;
-       union acpi_object param;
        acpi_status status;
        unsigned long value;
        int error;
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
        if (error)
                return error;
 
-       param.type = ACPI_TYPE_INTEGER;
-       param.integer.value = value;
-
-       input.count = 1;
-       input.pointer = &param;
-
-       status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+       status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
 
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
        struct acpi_device *acpi;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
+       unsigned long long value;
        acpi_status status;
 
        acpi = to_acpi_device(dev);
 
-       status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       return sprintf(buf, "%lld\n", result->integer.value);
+       return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct acpi_device *acpi;
-       struct acpi_object_list input;
-       union acpi_object param;
        acpi_status status;
        unsigned long value;
        int error;
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
        if (error)
                return error;
 
-       param.type = ACPI_TYPE_INTEGER;
-       param.integer.value = value;
-
-       input.count = 1;
-       input.pointer = &param;
-
-       status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+       status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
 
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
index 52259dcabecb8251edad2a0e0dc7eccb9b174500..1838400dc0360615f799fac64a82b51f7efb9c17 100644 (file)
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL");
 
 static int smartconnect_acpi_init(struct acpi_device *acpi)
 {
-       struct acpi_object_list input;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
-       union acpi_object param;
+       unsigned long long value;
        acpi_status status;
 
-       status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       if (result->integer.value & 0x1) {
-               param.type = ACPI_TYPE_INTEGER;
-               param.integer.value = 0;
-
-               input.count = 1;
-               input.pointer = &param;
-
+       if (value & 0x1) {
                dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
-               status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
-                                             NULL);
+               status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
        }
 
-       kfree(result);
-
        return 0;
 }
 
index d6cfc1558c2f1e067d8fd72edb1dc7ce3ecd00e3..11244f8703c402c876fb34b0e107383781661416 100644 (file)
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = {
 static int intel_menlow_memory_add(struct acpi_device *device)
 {
        int result = -ENODEV;
-       acpi_status status = AE_OK;
-       acpi_handle dummy;
        struct thermal_cooling_device *cdev;
 
        if (!device)
                return -EINVAL;
 
-       status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
                goto end;
 
-       status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
                goto end;
 
        cdev = thermal_cooling_device_register("Memory controller", device,
index d3fd52036fd6e041592c7060c987ed5508684a0e..47caab0ea7a14faa1051b35eafe1ff8e53703ee0 100644 (file)
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
                 "default is -1 (automatic)");
 #endif
 
-static int kbd_backlight = 1;
+static int kbd_backlight = -1;
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
                 "set this to 0 to disable keyboard backlight, "
-                "1 to enable it (default: 0)");
+                "1 to enable it (default: no change from current value)");
 
-static int kbd_backlight_timeout;      /* = 0 */
+static int kbd_backlight_timeout = -1;
 module_param(kbd_backlight_timeout, int, 0444);
 MODULE_PARM_DESC(kbd_backlight_timeout,
-                "set this to 0 to set the default 10 seconds timeout, "
-                "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
-                "(default: 0)");
+                "meaningful values vary from 0 to 3 and their meaning depends "
+                "on the model (default: no change from current value)");
 
 #ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void);
@@ -1509,7 +1508,6 @@ static void sony_nc_function_resume(void)
 static int sony_nc_resume(struct device *dev)
 {
        struct sony_nc_value *item;
-       acpi_handle handle;
 
        for (item = sony_nc_values; item->name; item++) {
                int ret;
@@ -1524,15 +1522,13 @@ static int sony_nc_resume(struct device *dev)
                }
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
                int arg = 1;
                if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle)))
+       if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
                sony_nc_function_resume();
 
        return 0;
@@ -1844,6 +1840,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (!kbdbl_ctl)
                return -ENOMEM;
 
+       kbdbl_ctl->mode = kbd_backlight;
+       kbdbl_ctl->timeout = kbd_backlight_timeout;
        kbdbl_ctl->handle = handle;
        if (handle == 0x0137)
                kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1868,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (ret)
                goto outmode;
 
-       __sony_nc_kbd_backlight_mode_set(kbd_backlight);
-       __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
+       __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
+       __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
 
        return 0;
 
@@ -1886,17 +1884,8 @@ outkzalloc:
 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
        if (kbdbl_ctl) {
-               int result;
-
                device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
                device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
-
-               /* restore the default hw behaviour */
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base | 0x10000, &result);
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base + 0x200, &result);
-
                kfree(kbdbl_ctl);
                kbdbl_ctl = NULL;
        }
@@ -2690,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
 
 static void sony_nc_backlight_setup(void)
 {
-       acpi_handle unused;
        int max_brightness = 0;
        const struct backlight_ops *ops = NULL;
        struct backlight_properties props;
@@ -2725,8 +2713,7 @@ static void sony_nc_backlight_setup(void)
                sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
-       } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
-                                               &unused))) {
+       } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
                ops = &sony_backlight_ops;
                max_brightness = SONY_MAX_BRIGHTNESS - 1;
 
@@ -2758,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device)
 {
        acpi_status status;
        int result = 0;
-       acpi_handle handle;
        struct sony_nc_value *item;
 
        pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2798,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device)
                goto outplatform;
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
                int arg = 1;
                if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
                dprintk("Doing SNC setup\n");
                /* retrieve the available handles */
                result = sony_nc_handles_setup(sony_pf_device);
@@ -2829,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device)
 
                /* find the available acpiget as described in the DSDT */
                for (; item->acpiget && *item->acpiget; ++item->acpiget) {
-                       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-                                                        *item->acpiget,
-                                                        &handle))) {
+                       if (acpi_has_method(sony_nc_acpi_handle,
+                                                       *item->acpiget)) {
                                dprintk("Found %s getter: %s\n",
                                                item->name, *item->acpiget);
                                item->devattr.attr.mode |= S_IRUGO;
@@ -2841,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device)
 
                /* find the available acpiset as described in the DSDT */
                for (; item->acpiset && *item->acpiset; ++item->acpiset) {
-                       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-                                                        *item->acpiset,
-                                                        &handle))) {
+                       if (acpi_has_method(sony_nc_acpi_handle,
+                                                       *item->acpiset)) {
                                dprintk("Found %s setter: %s\n",
                                                item->name, *item->acpiset);
                                item->devattr.attr.mode |= S_IWUSR;
index 03ca6c139f1a5c9dd0cc821045bad7b5cb409d51..170f2788ee671491725fe6403b96a4b5d6cca9bc 100644 (file)
@@ -700,6 +700,14 @@ static void __init drv_acpi_handle_init(const char *name,
 static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
                        u32 level, void *context, void **return_value)
 {
+       struct acpi_device *dev;
+       if (!strcmp(context, "video")) {
+               if (acpi_bus_get_device(handle, &dev))
+                       return AE_OK;
+               if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+                       return AE_OK;
+       }
+
        *(acpi_handle *)return_value = handle;
 
        return AE_CTRL_TERMINATE;
@@ -712,10 +720,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name,
        acpi_status status;
        acpi_handle device_found;
 
-       BUG_ON(!name || !hid || !handle);
+       BUG_ON(!name || !handle);
        vdbg_printk(TPACPI_DBG_INIT,
                        "trying to locate ACPI handle for %s, using HID %s\n",
-                       name, hid);
+                       name, hid ? hid : "NULL");
 
        memset(&device_found, 0, sizeof(device_found));
        status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6090,19 +6098,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
+       struct acpi_device *device, *child;
        int rc;
 
-       if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+       if (acpi_bus_get_device(handle, &device))
+               return 0;
+
+       rc = 0;
+       list_for_each_entry(child, &device->children, node) {
+               acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+                                                         NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       continue;
+
                obj = (union acpi_object *)buffer.pointer;
                if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
                        pr_err("Unknown _BCL data, please report this to %s\n",
-                              TPACPI_MAIL);
+                               TPACPI_MAIL);
                        rc = 0;
                } else {
                        rc = obj->package.count;
                }
-       } else {
-               return 0;
+               break;
        }
 
        kfree(buffer.pointer);
@@ -6118,7 +6135,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
        acpi_handle video_device;
        int bcl_levels = 0;
 
-       tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+       tpacpi_acpi_handle_locate("video", NULL, &video_device);
        if (video_device)
                bcl_levels = tpacpi_query_bcl_levels(video_device);
 
index 4ab618c63b457e0bbf5a613274ab10a3e76f26cc..67897c8740ba58ea3c93cf54b0a2a3e74a1e3a84 100644 (file)
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
 static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
 {
        acpi_status status;
-       union acpi_object fncx_params[1] = {
-               { .type = ACPI_TYPE_INTEGER }
-       };
-       struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
 
-       fncx_params[0].integer.value = state ? 0x86 : 0x87;
-       status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+       status = acpi_execute_simple_method(device->handle, "FNCX",
+                                               state ? 0x86 : 0x87);
        if (ACPI_FAILURE(status)) {
                pr_err("Unable to switch FNCX notifications\n");
                return -ENODEV;
index eb3467ea6d860e6c6961f3294922804e0e5776a2..0cfadb65f7c639597abce1d1bcb81ba3a04ff1d3 100644 (file)
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
 
 static int write_acpi_int(const char *methodName, int val)
 {
-       struct acpi_object_list params;
-       union acpi_object in_objs[1];
        acpi_status status;
 
-       params.count = ARRAY_SIZE(in_objs);
-       params.pointer = in_objs;
-       in_objs[0].type = ACPI_TYPE_INTEGER;
-       in_objs[0].integer.value = val;
-
-       status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+       status = acpi_execute_simple_method(NULL, (char *)methodName, val);
        return (status == AE_OK) ? 0 : -EIO;
 }
 
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work)
  */
 static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
 {
-       struct acpi_buffer buf;
-       union acpi_object out_obj;
+       unsigned long long value;
        acpi_status status;
 
-       buf.pointer = &out_obj;
-       buf.length = sizeof(out_obj);
-
-       status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
-                                     NULL, &buf);
-       if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+       status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+                                     NULL, &value);
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI INFO method execution failed\n");
                return -EIO;
        }
 
-       return out_obj.integer.value;
+       return value;
 }
 
 static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
 static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
 {
        acpi_status status;
-       acpi_handle ec_handle, handle;
+       acpi_handle ec_handle;
        int error;
        u32 hci_result;
 
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
         */
        status = AE_ERROR;
        ec_handle = ec_get_handle();
-       if (ec_handle)
-               status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
-       if (ACPI_SUCCESS(status)) {
+       if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
                INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
 
                error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
         * Determine hotkey query interface. Prefer using the INFO
         * method when it is available.
         */
-       status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
-       if (ACPI_SUCCESS(status)) {
+       if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
                dev->info_supported = 1;
-       else {
+       else {
                hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                if (hci_result == HCI_SUCCESS)
                        dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
 
 static const char *find_hci_method(acpi_handle handle)
 {
-       acpi_status status;
-       acpi_handle hci_handle;
-
-       status = acpi_get_handle(handle, "GHCI", &hci_handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "GHCI"))
                return "GHCI";
 
-       status = acpi_get_handle(handle, "SPFC", &hci_handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "SPFC"))
                return "SPFC";
 
        return NULL;
index 601ea951224201a87719f0acc7de5ee7e4635649..62e8c221d01ea10a5f105a0a44b2d2d8277d7878 100644 (file)
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
 {
        struct guid_block *block = NULL;
        char method[5];
-       struct acpi_object_list input;
-       union acpi_object params[1];
        acpi_status status;
        acpi_handle handle;
 
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
        if (!block)
                return AE_NOT_EXIST;
 
-       input.count = 1;
-       input.pointer = params;
-       params[0].type = ACPI_TYPE_INTEGER;
-       params[0].integer.value = enable;
 
        snprintf(method, 5, "WE%02X", block->notify_id);
-       status = acpi_evaluate_object(handle, method, &input, NULL);
+       status = acpi_execute_simple_method(handle, method, enable);
 
        if (status != AE_OK && status != AE_NOT_FOUND)
                return status;
@@ -353,10 +347,10 @@ struct acpi_buffer *out)
 {
        struct guid_block *block = NULL;
        struct wmi_block *wblock = NULL;
-       acpi_handle handle, wc_handle;
+       acpi_handle handle;
        acpi_status status, wc_status = AE_ERROR;
-       struct acpi_object_list input, wc_input;
-       union acpi_object wc_params[1], wq_params[1];
+       struct acpi_object_list input;
+       union acpi_object wq_params[1];
        char method[5];
        char wc_method[5] = "WC";
 
@@ -386,11 +380,6 @@ struct acpi_buffer *out)
         * enable collection.
         */
        if (block->flags & ACPI_WMI_EXPENSIVE) {
-               wc_input.count = 1;
-               wc_input.pointer = wc_params;
-               wc_params[0].type = ACPI_TYPE_INTEGER;
-               wc_params[0].integer.value = 1;
-
                strncat(wc_method, block->object_id, 2);
 
                /*
@@ -398,10 +387,9 @@ struct acpi_buffer *out)
                 * expensive, but have no corresponding WCxx method. So we
                 * should not fail if this happens.
                 */
-               wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
-               if (ACPI_SUCCESS(wc_status))
-                       wc_status = acpi_evaluate_object(handle, wc_method,
-                               &wc_input, NULL);
+               if (acpi_has_method(handle, wc_method))
+                       wc_status = acpi_execute_simple_method(handle,
+                                                               wc_method, 1);
        }
 
        strcpy(method, "WQ");
@@ -414,9 +402,7 @@ struct acpi_buffer *out)
         * the WQxx method failed - we should disable collection anyway.
         */
        if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
-               wc_params[0].integer.value = 0;
-               status = acpi_evaluate_object(handle,
-               wc_method, &wc_input, NULL);
+               status = acpi_execute_simple_method(handle, wc_method, 0);
        }
 
        return status;
index 34049b0b4c731e352ae48f62c7fd3edcfc71b517..747826d99059955f8941d592ef2f7734cd38fd08 100644 (file)
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
 
 static int __init pnpacpi_add_device(struct acpi_device *device)
 {
-       acpi_handle temp = NULL;
-       acpi_status status;
        struct pnp_dev *dev;
        char *pnpid;
        struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
         * If a PnPacpi device is not present , the device
         * driver should not be loaded.
         */
-       status = acpi_get_handle(device->handle, "_CRS", &temp);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, "_CRS"))
                return 0;
 
        pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
        dev->data = device;
        /* .enabled means the device can decode the resources */
        dev->active = device->status.enabled;
-       status = acpi_get_handle(device->handle, "_SRS", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_SRS"))
                dev->capabilities |= PNP_CONFIGURABLE;
        dev->capabilities |= PNP_READ;
        if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
                dev->capabilities |= PNP_WRITE;
        if (device->flags.removable)
                dev->capabilities |= PNP_REMOVABLE;
-       status = acpi_get_handle(device->handle, "_DIS", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_DIS"))
                dev->capabilities |= PNP_DISABLE;
 
        if (strlen(acpi_device_name(device)))
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644 (file)
index 0000000..a7c81b5
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+       bool "Generic powercap sysfs driver"
+       help
+         The power capping sysfs interface allows kernel subsystems to expose power
+         capping settings to user space in a consistent way.  Usually, it consists
+         of multiple control types that determine which settings may be exposed and
+         power zones representing parts of the system that can be subject to power
+         capping.
+
+         If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+       tristate "Intel RAPL Support"
+       depends on X86
+       default n
+       ---help---
+         This enables support for the Intel Running Average Power Limit (RAPL)
+         technology which allows power limits to be enforced and monitored on
+         modern Intel processors (Sandy Bridge and later).
+
+         In RAPL, the platform level settings are divided into domains for
+         fine grained control. These domains include processor package, DRAM
+         controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+         1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644 (file)
index 0000000..0a21ef3
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP) += powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644 (file)
index 0000000..2a786c5
--- /dev/null
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK      0xffffffff
+
+#define POWER_LIMIT1_MASK       0x7FFF
+#define POWER_LIMIT1_ENABLE     BIT(15)
+#define POWER_LIMIT1_CLAMP      BIT(16)
+
+#define POWER_LIMIT2_MASK       (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE     BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP      BIT_ULL(48)
+#define POWER_PACKAGE_LOCK      BIT_ULL(63)
+#define POWER_PP_LOCK           BIT(31)
+
+#define TIME_WINDOW1_MASK       (0x7FULL<<17)
+#define TIME_WINDOW2_MASK       (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET      0
+#define POWER_UNIT_MASK                0x0F
+
+#define ENERGY_UNIT_OFFSET     0x08
+#define ENERGY_UNIT_MASK       0x1F00
+
+#define TIME_UNIT_OFFSET       0x10
+#define TIME_UNIT_MASK         0xF0000
+
+#define POWER_INFO_MAX_MASK     (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK     (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK     (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK     0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK         0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED       BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY         BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE     (1000000)
+#define ENERGY_UNIT_SCALE    (1000000)
+#define TIME_UNIT_SCALE      (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+       ARBITRARY_UNIT, /* no translation */
+       POWER_UNIT,
+       ENERGY_UNIT,
+       TIME_UNIT,
+};
+
+enum rapl_domain_type {
+       RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+       RAPL_DOMAIN_PP0, /* core power plane */
+       RAPL_DOMAIN_PP1, /* graphics uncore */
+       RAPL_DOMAIN_DRAM,/* DRAM control_type */
+       RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+       RAPL_DOMAIN_MSR_LIMIT,
+       RAPL_DOMAIN_MSR_STATUS,
+       RAPL_DOMAIN_MSR_PERF,
+       RAPL_DOMAIN_MSR_POLICY,
+       RAPL_DOMAIN_MSR_INFO,
+       RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+       ENERGY_COUNTER,
+       POWER_LIMIT1,
+       POWER_LIMIT2,
+       FW_LOCK,
+
+       PL1_ENABLE,  /* power limit 1, aka long term */
+       PL1_CLAMP,   /* allow frequency to go below OS request */
+       PL2_ENABLE,  /* power limit 2, aka short term, instantaneous */
+       PL2_CLAMP,
+
+       TIME_WINDOW1, /* long term */
+       TIME_WINDOW2, /* short term */
+       THERMAL_SPEC_POWER,
+       MAX_POWER,
+
+       MIN_POWER,
+       MAX_TIME_WINDOW,
+       THROTTLED_TIME,
+       PRIORITY_LEVEL,
+
+       /* below are not raw primitive data */
+       AVERAGE_POWER,
+       NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+       u64 primitives[NR_RAPL_PRIMITIVES];
+       unsigned long timestamp;
+};
+
+
+#define        DOMAIN_STATE_INACTIVE           BIT(0)
+#define        DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED        BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+       struct powercap_zone_constraint *constraint;
+       int prim_id; /* primitive ID used to enable */
+       struct rapl_domain *domain;
+       const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+       const char *name;
+       enum rapl_domain_type id;
+       int msrs[RAPL_DOMAIN_MSR_MAX];
+       struct powercap_zone power_zone;
+       struct rapl_domain_data rdd;
+       struct rapl_power_limit rpl[NR_POWER_LIMITS];
+       u64 attr_map; /* track capabilities */
+       unsigned int state;
+       int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+       container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+       unsigned int id; /* physical package/socket id */
+       unsigned int nr_domains;
+       unsigned long domain_map; /* bit map of active domains */
+       unsigned int power_unit_divisor;
+       unsigned int energy_unit_divisor;
+       unsigned int time_unit_divisor;
+       struct rapl_domain *domains; /* array of domains, sized at runtime */
+       struct powercap_zone *power_zone; /* keep track of parent zone */
+       int nr_cpus; /* active cpus on the package, topology info is lost during
+                     * cpu hotplug. so we have to track ourselves.
+                     */
+       unsigned long power_limit_irq; /* keep track of package power limit
+                                       * notify interrupt enable status.
+                                       */
+       struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED   BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+       const char *name;
+       u64 mask;
+       int shift;
+       enum rapl_domain_msr_id id;
+       enum unit_type unit;
+       u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) {        \
+               .name = #p,                     \
+               .mask = m,                      \
+               .shift = s,                     \
+               .id = i,                        \
+               .unit = u,                      \
+               .flag = f                       \
+       }
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+                       int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+       "package",
+       "core",
+       "uncore",
+       "dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+       struct rapl_package *rp;
+
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               if (rp->id == id)
+                       return rp;
+       }
+
+       return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+       int i;
+
+       for_each_online_cpu(i) {
+               if (topology_physical_package_id(i) == package_id)
+                       return i;
+       }
+       /* all CPUs on this package are offline */
+
+       return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+       struct rapl_package *p, *tmp;
+
+       list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+               kfree(p->domains);
+               list_del(&p->plist);
+               kfree(p);
+       }
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+       struct rapl_domain *rd;
+       u64 energy_now;
+
+       /* prevent CPU hotplug, make sure the RAPL domain does not go
+        * away while reading the counter.
+        */
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+
+       if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+               *energy_raw = energy_now;
+               put_online_cpus();
+
+               return 0;
+       }
+       put_online_cpus();
+
+       return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+       *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+       return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       struct rapl_package *rp;
+
+       /* package zone is the last zone of a package, we can free
+        * memory here since all children has been unregistered.
+        */
+       if (rd->id == RAPL_DOMAIN_PACKAGE) {
+               rp = find_package_by_id(rd->package_id);
+               if (!rp) {
+                       dev_warn(&power_zone->dev, "no package id %s\n",
+                               rd->name);
+                       return -ENODEV;
+               }
+               kfree(rd);
+               rp->domains = NULL;
+       }
+
+       return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+       int i;
+
+       for (i = 0; i < NR_POWER_LIMITS; i++) {
+               if (rd->rpl[i].name == NULL)
+                       break;
+       }
+
+       return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       int nr_powerlimit;
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+               return -EACCES;
+       get_online_cpus();
+       nr_powerlimit = find_nr_power_limit(rd);
+       /* here we activate/deactivate the hardware for power limiting */
+       rapl_write_data_raw(rd, PL1_ENABLE, mode);
+       /* always enable clamp such that p-state can go below OS requested
+        * range. power capping priority over guranteed frequency.
+        */
+       rapl_write_data_raw(rd, PL1_CLAMP, mode);
+       /* some domains have pl2 */
+       if (nr_powerlimit > 1) {
+               rapl_write_data_raw(rd, PL2_ENABLE, mode);
+               rapl_write_data_raw(rd, PL2_CLAMP, mode);
+       }
+       put_online_cpus();
+
+       return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       u64 val;
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+               *mode = false;
+               return 0;
+       }
+       get_online_cpus();
+       if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+               put_online_cpus();
+               return -EIO;
+       }
+       *mode = val;
+       put_online_cpus();
+
+       return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+       /* RAPL_DOMAIN_PACKAGE */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_PP0 */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_PP1 */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_DRAM */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+                       u64 power_limit)
+{
+       struct rapl_domain *rd;
+       struct rapl_package *rp;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       rp = find_package_by_id(rd->package_id);
+       if (!rp) {
+               ret = -ENODEV;
+               goto set_exit;
+       }
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+               dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+                       rd->name);
+               ret = -EACCES;
+               goto set_exit;
+       }
+
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+               break;
+       case PL2_ENABLE:
+               rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       if (!ret)
+               package_power_limit_irq_save(rd->package_id);
+set_exit:
+       put_online_cpus();
+       return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+                                       u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int prim;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               prim = POWER_LIMIT1;
+               break;
+       case PL2_ENABLE:
+               prim = POWER_LIMIT2;
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (rapl_read_data_raw(rd, prim, true, &val))
+               ret = -EIO;
+       else
+               *data = val;
+
+       put_online_cpus();
+
+       return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+                                                               u64 window)
+{
+       struct rapl_domain *rd;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               rapl_write_data_raw(rd, TIME_WINDOW1, window);
+               break;
+       case PL2_ENABLE:
+               rapl_write_data_raw(rd, TIME_WINDOW2, window);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       put_online_cpus();
+       return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+               break;
+       case PL2_ENABLE:
+               ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (!ret)
+               *data = val;
+       put_online_cpus();
+
+       return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+       struct rapl_power_limit *rpl;
+       struct rapl_domain *rd;
+
+       rd = power_zone_to_rapl_domain(power_zone);
+       rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+       return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+                                       u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int prim;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               prim = THERMAL_SPEC_POWER;
+               break;
+       case PL2_ENABLE:
+               prim = MAX_POWER;
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (rapl_read_data_raw(rd, prim, true, &val))
+               ret = -EIO;
+       else
+               *data = val;
+
+       put_online_cpus();
+
+       return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+       .set_power_limit_uw = set_power_limit,
+       .get_power_limit_uw = get_current_power_limit,
+       .set_time_window_us = set_time_window,
+       .get_time_window_us = get_time_window,
+       .get_max_power_uw = get_max_power,
+       .get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+       int i;
+       struct rapl_domain *rd = rp->domains;
+
+       for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+               unsigned int mask = rp->domain_map & (1 << i);
+               switch (mask) {
+               case BIT(RAPL_DOMAIN_PACKAGE):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+                       rd->id = RAPL_DOMAIN_PACKAGE;
+                       rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+                       rd->msrs[2] = MSR_PKG_PERF_STATUS;
+                       rd->msrs[3] = 0;
+                       rd->msrs[4] = MSR_PKG_POWER_INFO;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       rd->rpl[1].prim_id = PL2_ENABLE;
+                       rd->rpl[1].name = pl2_name;
+                       break;
+               case BIT(RAPL_DOMAIN_PP0):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+                       rd->id = RAPL_DOMAIN_PP0;
+                       rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+                       rd->msrs[2] = 0;
+                       rd->msrs[3] = MSR_PP0_POLICY;
+                       rd->msrs[4] = 0;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               case BIT(RAPL_DOMAIN_PP1):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+                       rd->id = RAPL_DOMAIN_PP1;
+                       rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+                       rd->msrs[2] = 0;
+                       rd->msrs[3] = MSR_PP1_POLICY;
+                       rd->msrs[4] = 0;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               case BIT(RAPL_DOMAIN_DRAM):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+                       rd->id = RAPL_DOMAIN_DRAM;
+                       rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+                       rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+                       rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+                       rd->msrs[3] = 0;
+                       rd->msrs[4] = MSR_DRAM_POWER_INFO;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               }
+               if (mask) {
+                       rd->package_id = rp->id;
+                       rd++;
+               }
+       }
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+                       int to_raw)
+{
+       u64 divisor = 1;
+       int scale = 1; /* scale to user friendly data without floating point */
+       u64 f, y; /* fraction and exp. used for time unit */
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package);
+       if (!rp)
+               return value;
+
+       switch (type) {
+       case POWER_UNIT:
+               divisor = rp->power_unit_divisor;
+               scale = POWER_UNIT_SCALE;
+               break;
+       case ENERGY_UNIT:
+               scale = ENERGY_UNIT_SCALE;
+               divisor = rp->energy_unit_divisor;
+               break;
+       case TIME_UNIT:
+               divisor = rp->time_unit_divisor;
+               scale = TIME_UNIT_SCALE;
+               /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+                * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+                */
+               if (!to_raw) {
+                       f = (value & 0x60) >> 5;
+                       y = value & 0x1f;
+                       value = (1 << y) * (4 + f) * scale / 4;
+                       return div64_u64(value, divisor);
+               } else {
+                       do_div(value, scale);
+                       value *= divisor;
+                       y = ilog2(value);
+                       f = div64_u64(4 * (value - (1 << y)), 1 << y);
+                       value = (y & 0x1f) | ((f & 0x3) << 5);
+                       return value;
+               }
+               break;
+       case ARBITRARY_UNIT:
+       default:
+               return value;
+       };
+
+       if (to_raw)
+               return div64_u64(value * divisor, scale);
+       else
+               return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+       /* name, mask, shift, msr index, unit divisor */
+       PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+                               RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+                               RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+                               RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+                               RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+                               RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+                               0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+                               RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+                               RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+                               RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+                               RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+                               RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+       /* non-hardware */
+       PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+                               RAPL_PRIMITIVE_DERIVED),
+       {NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * |                           xxxxx (mask)                   |
+ * |                                |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       bool xlate, u64 *data)
+{
+       u64 value, final;
+       u32 msr;
+       struct rapl_primitive_info *rp = &rpi[prim];
+       int cpu;
+
+       if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+               return -EINVAL;
+
+       msr = rd->msrs[rp->id];
+       if (!msr)
+               return -EINVAL;
+       /* use physical package id to look up active cpus */
+       cpu = find_active_cpu_on_package(rd->package_id);
+       if (cpu < 0)
+               return cpu;
+
+       /* special-case package domain, which uses a different bit*/
+       if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+               rp->mask = POWER_PACKAGE_LOCK;
+               rp->shift = 63;
+       }
+       /* non-hardware data are collected by the polling thread */
+       if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+               *data = rd->rdd.primitives[prim];
+               return 0;
+       }
+
+       if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+               pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+
+       final = value & rp->mask;
+       final = final >> rp->shift;
+       if (xlate)
+               *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+       else
+               *data = final;
+
+       return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       unsigned long long value)
+{
+       u64 msr_val;
+       u32 msr;
+       struct rapl_primitive_info *rp = &rpi[prim];
+       int cpu;
+
+       cpu = find_active_cpu_on_package(rd->package_id);
+       if (cpu < 0)
+               return cpu;
+       msr = rd->msrs[rp->id];
+       if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+               dev_dbg(&rd->power_zone.dev,
+                       "failed to read msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+       value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+       msr_val &= ~rp->mask;
+       msr_val |= value << rp->shift;
+       if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+               dev_dbg(&rd->power_zone.dev,
+                       "failed to write msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+       u64 msr_val;
+       u32 value;
+
+       if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+               pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+                       MSR_RAPL_POWER_UNIT, cpu);
+               return -ENODEV;
+       }
+
+       /* Raw RAPL data stored in MSRs are in certain scales. We need to
+        * convert them into standard units based on the divisors reported in
+        * the RAPL unit MSRs.
+        * i.e.
+        * energy unit: 1/enery_unit_divisor Joules
+        * power unit: 1/power_unit_divisor Watts
+        * time unit: 1/time_unit_divisor Seconds
+        */
+       value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+       rp->energy_unit_divisor = 1 << value;
+
+
+       value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+       rp->power_unit_divisor = 1 << value;
+
+       value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+       rp->time_unit_divisor = 1 << value;
+
+       pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+               rp->id,
+               rp->energy_unit_divisor,
+               rp->time_unit_divisor,
+               rp->power_unit_divisor);
+
+       return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+       u32 l, h = 0;
+       int cpu;
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package_id);
+       if (!rp)
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+               return;
+
+       cpu = find_active_cpu_on_package(package_id);
+       if (cpu < 0)
+               return;
+       /* save the state of PLN irq mask bit before disabling it */
+       rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+       if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+               rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+               rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+       }
+       l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+       wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+       u32 l, h;
+       int cpu;
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package_id);
+       if (!rp)
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+               return;
+
+       cpu = find_active_cpu_on_package(package_id);
+       if (cpu < 0)
+               return;
+
+       /* irq enable state not saved, nothing to restore */
+       if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+               return;
+       rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+       if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+               l |= PACKAGE_THERM_INT_PLN_ENABLE;
+       else
+               l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+       wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+       { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+       { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+       { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+       { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+       /* TODO: Add more CPU IDs after testing */
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+       int dmn, prim;
+       u64 val;
+       struct rapl_package *rp;
+
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+                       pr_debug("update package %d domain %s data\n", rp->id,
+                               rp->domains[dmn].name);
+                       /* exclude non-raw primitives */
+                       for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+                               if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+                                                               rpi[prim].unit,
+                                                               &val))
+                                       rp->domains[dmn].rdd.primitives[prim] =
+                                                                       val;
+               }
+       }
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+       struct rapl_package *rp;
+       struct rapl_domain *rd, *rd_package = NULL;
+
+       /* unregister all active rapl packages from the powercap layer,
+        * hotplug lock held
+        */
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               package_power_limit_irq_restore(rp->id);
+
+               for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+                    rd++) {
+                       pr_debug("remove package, undo power limit on %d: %s\n",
+                               rp->id, rd->name);
+                       rapl_write_data_raw(rd, PL1_ENABLE, 0);
+                       rapl_write_data_raw(rd, PL2_ENABLE, 0);
+                       rapl_write_data_raw(rd, PL1_CLAMP, 0);
+                       rapl_write_data_raw(rd, PL2_CLAMP, 0);
+                       if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                               rd_package = rd;
+                               continue;
+                       }
+                       powercap_unregister_zone(control_type, &rd->power_zone);
+               }
+               /* do the package zone last */
+               if (rd_package)
+                       powercap_unregister_zone(control_type,
+                                               &rd_package->power_zone);
+       }
+       powercap_unregister_control_type(control_type);
+
+       return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+       struct rapl_domain *rd;
+       int ret = 0;
+       char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+       struct powercap_zone *power_zone = NULL;
+       int nr_pl;
+
+       /* first we register package domain as the parent zone*/
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                       nr_pl = find_nr_power_limit(rd);
+                       pr_debug("register socket %d package domain %s\n",
+                               rp->id, rd->name);
+                       memset(dev_name, 0, sizeof(dev_name));
+                       snprintf(dev_name, sizeof(dev_name), "%s-%d",
+                               rd->name, rp->id);
+                       power_zone = powercap_register_zone(&rd->power_zone,
+                                                       control_type,
+                                                       dev_name, NULL,
+                                                       &zone_ops[rd->id],
+                                                       nr_pl,
+                                                       &constraint_ops);
+                       if (IS_ERR(power_zone)) {
+                               pr_debug("failed to register package, %d\n",
+                                       rp->id);
+                               ret = PTR_ERR(power_zone);
+                               goto exit_package;
+                       }
+                       /* track parent zone in per package/socket data */
+                       rp->power_zone = power_zone;
+                       /* done, only one package domain per socket */
+                       break;
+               }
+       }
+       if (!power_zone) {
+               pr_err("no package domain found, unknown topology!\n");
+               ret = -ENODEV;
+               goto exit_package;
+       }
+       /* now register domains as children of the socket/package*/
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE)
+                       continue;
+               /* number of power limits per domain varies */
+               nr_pl = find_nr_power_limit(rd);
+               power_zone = powercap_register_zone(&rd->power_zone,
+                                               control_type, rd->name,
+                                               rp->power_zone,
+                                               &zone_ops[rd->id], nr_pl,
+                                               &constraint_ops);
+
+               if (IS_ERR(power_zone)) {
+                       pr_debug("failed to register power_zone, %d:%s:%s\n",
+                               rp->id, rd->name, dev_name);
+                       ret = PTR_ERR(power_zone);
+                       goto err_cleanup;
+               }
+       }
+
+exit_package:
+       return ret;
+err_cleanup:
+       /* clean up previously initialized domains within the package if we
+        * failed after the first domain setup.
+        */
+       while (--rd >= rp->domains) {
+               pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+               powercap_unregister_zone(control_type, &rd->power_zone);
+       }
+
+       return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+       struct rapl_domain *rd;
+       struct rapl_package *rp;
+       int ret = 0;
+
+       control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+       if (IS_ERR(control_type)) {
+               pr_debug("failed to register powercap control_type.\n");
+               return PTR_ERR(control_type);
+       }
+       /* read the initial data */
+       rapl_update_domain_data();
+       list_for_each_entry(rp, &rapl_packages, plist)
+               if (rapl_package_register_powercap(rp))
+                       goto err_cleanup_package;
+       return ret;
+
+err_cleanup_package:
+       /* clean up previously initialized packages */
+       list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+               for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+                    rd++) {
+                       pr_debug("unregister zone/package %d, %s domain\n",
+                               rp->id, rd->name);
+                       powercap_unregister_zone(control_type, &rd->power_zone);
+               }
+       }
+
+       return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+       unsigned msr;
+       u64 val1, val2 = 0;
+       int retry = 0;
+
+       switch (domain) {
+       case RAPL_DOMAIN_PACKAGE:
+               msr = MSR_PKG_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_PP0:
+               msr = MSR_PP0_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_PP1:
+               msr = MSR_PP1_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_DRAM:
+               msr = MSR_DRAM_ENERGY_STATUS;
+               break;
+       default:
+               pr_err("invalid domain id %d\n", domain);
+               return -EINVAL;
+       }
+       if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+               return -ENODEV;
+
+       /* energy counters roll slowly on some domains */
+       while (++retry < 10) {
+               usleep_range(10000, 15000);
+               rdmsrl_safe_on_cpu(cpu, msr, &val2);
+               if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+                       return 0;
+       }
+       /* if energy counter does not change, report as bad domain */
+       pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+               rapl_domain_names[domain], val1, val2);
+
+       return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+       int i;
+       int ret = 0;
+       struct rapl_domain *rd;
+       u64 locked;
+
+       for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+               /* use physical package id to read counters */
+               if (!rapl_check_domain(cpu, i))
+                       rp->domain_map |= 1 << i;
+       }
+       rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
+       if (!rp->nr_domains) {
+               pr_err("no valid rapl domains found in package %d\n", rp->id);
+               ret = -ENODEV;
+               goto done;
+       }
+       pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+       rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+                       GFP_KERNEL);
+       if (!rp->domains) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       rapl_init_domains(rp);
+
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               /* check if the domain is locked by BIOS */
+               if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+                       pr_info("RAPL package %d domain %s locked by BIOS\n",
+                               rp->id, rd->name);
+                               rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+               }
+       }
+
+
+done:
+       return ret;
+}
+
+static bool is_package_new(int package)
+{
+       struct rapl_package *rp;
+
+       /* caller prevents cpu hotplug, there will be no new packages added
+        * or deleted while traversing the package list, no need for locking.
+        */
+       list_for_each_entry(rp, &rapl_packages, plist)
+               if (package == rp->id)
+                       return false;
+
+       return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+       int i;
+       int phy_package_id;
+       struct rapl_package *new_package, *rp;
+
+       for_each_online_cpu(i) {
+               phy_package_id = topology_physical_package_id(i);
+               if (is_package_new(phy_package_id)) {
+                       new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+                       if (!new_package) {
+                               rapl_cleanup_data();
+                               return -ENOMEM;
+                       }
+                       /* add the new package to the list */
+                       new_package->id = phy_package_id;
+                       new_package->nr_cpus = 1;
+
+                       /* check if the package contains valid domains */
+                       if (rapl_detect_domains(new_package, i) ||
+                               rapl_check_unit(new_package, i)) {
+                               kfree(new_package->domains);
+                               kfree(new_package);
+                               /* free up the packages already initialized */
+                               rapl_cleanup_data();
+                               return -ENODEV;
+                       }
+                       INIT_LIST_HEAD(&new_package->plist);
+                       list_add(&new_package->plist, &rapl_packages);
+               } else {
+                       rp = find_package_by_id(phy_package_id);
+                       if (rp)
+                               ++rp->nr_cpus;
+               }
+       }
+
+       return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+       struct rapl_domain *rd, *rd_package = NULL;
+
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                       rd_package = rd;
+                       continue;
+               }
+               pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+               powercap_unregister_zone(control_type, &rd->power_zone);
+       }
+       /* do parent zone last */
+       powercap_unregister_zone(control_type, &rd_package->power_zone);
+       list_del(&rp->plist);
+       kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+       int ret = 0;
+       int phy_package_id;
+       struct rapl_package *rp;
+
+       phy_package_id = topology_physical_package_id(cpu);
+       rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+       if (!rp)
+               return -ENOMEM;
+
+       /* add the new package to the list */
+       rp->id = phy_package_id;
+       rp->nr_cpus = 1;
+       /* check if the package contains valid domains */
+       if (rapl_detect_domains(rp, cpu) ||
+               rapl_check_unit(rp, cpu)) {
+               ret = -ENODEV;
+               goto err_free_package;
+       }
+       if (!rapl_package_register_powercap(rp)) {
+               INIT_LIST_HEAD(&rp->plist);
+               list_add(&rp->plist, &rapl_packages);
+               return ret;
+       }
+
+err_free_package:
+       kfree(rp->domains);
+       kfree(rp);
+
+       return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+                               unsigned long action, void *hcpu)
+{
+       unsigned long cpu = (unsigned long)hcpu;
+       int phy_package_id;
+       struct rapl_package *rp;
+
+       phy_package_id = topology_physical_package_id(cpu);
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               rp = find_package_by_id(phy_package_id);
+               if (rp)
+                       ++rp->nr_cpus;
+               else
+                       rapl_add_package(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               rp = find_package_by_id(phy_package_id);
+               if (!rp)
+                       break;
+               if (--rp->nr_cpus == 0)
+                       rapl_remove_package(rp);
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+       .notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+       int ret = 0;
+
+       if (!x86_match_cpu(rapl_ids)) {
+               pr_err("driver does not support CPU family %d model %d\n",
+                       boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+               return -ENODEV;
+       }
+       /* prevent CPU hotplug during detection */
+       get_online_cpus();
+       ret = rapl_detect_topology();
+       if (ret)
+               goto done;
+
+       if (rapl_register_powercap()) {
+               rapl_cleanup_data();
+               ret = -ENODEV;
+               goto done;
+       }
+       register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+       put_online_cpus();
+
+       return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+       get_online_cpus();
+       unregister_hotcpu_notifier(&rapl_cpu_notifier);
+       rapl_unregister_powercap();
+       rapl_cleanup_data();
+       put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644 (file)
index 0000000..c22fa4c
--- /dev/null
@@ -0,0 +1,683 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+                       container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr)          \
+static ssize_t _attr##_show(struct device *dev, \
+                                       struct device_attribute *dev_attr,\
+                                       char *buf) \
+{ \
+       u64 value; \
+       ssize_t len = -EINVAL; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       \
+       if (power_zone->ops->get_##_attr) { \
+               if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+                       len = sprintf(buf, "%lld\n", value); \
+       } \
+       \
+       return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr)         \
+static ssize_t _attr##_store(struct device *dev,\
+                               struct device_attribute *dev_attr, \
+                               const char *buf, size_t count) \
+{ \
+       int err; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       u64 value; \
+       \
+       err = kstrtoull(buf, 10, &value); \
+       if (err) \
+               return -EINVAL; \
+       if (value) \
+               return count; \
+       if (power_zone->ops->reset_##_attr) { \
+               if (!power_zone->ops->reset_##_attr(power_zone)) \
+                       return count; \
+       } \
+       \
+       return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+                               struct device_attribute *dev_attr,\
+                               char *buf) \
+{ \
+       u64 value; \
+       ssize_t len = -ENODATA; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       int id; \
+       struct powercap_zone_constraint *pconst;\
+       \
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+               return -EINVAL; \
+       if (id >= power_zone->const_id_cnt)     \
+               return -EINVAL; \
+       pconst = &power_zone->constraints[id]; \
+       if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+               if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+                       len = sprintf(buf, "%lld\n", value); \
+       } \
+       \
+       return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+                               struct device_attribute *dev_attr, \
+                               const char *buf, size_t count) \
+{ \
+       int err; \
+       u64 value; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       int id; \
+       struct powercap_zone_constraint *pconst;\
+       \
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+               return -EINVAL; \
+       if (id >= power_zone->const_id_cnt)     \
+               return -EINVAL; \
+       pconst = &power_zone->constraints[id]; \
+       err = kstrtoull(buf, 10, &value); \
+       if (err) \
+               return -EINVAL; \
+       if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+               if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+                       return count; \
+       } \
+       \
+       return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+       struct device_attribute power_limit_attr;
+       struct device_attribute time_window_attr;
+       struct device_attribute max_power_attr;
+       struct device_attribute min_power_attr;
+       struct device_attribute max_time_window_attr;
+       struct device_attribute min_time_window_attr;
+       struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+                               constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN   30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+                               struct device_attribute *dev_attr,
+                               char *buf)
+{
+       const char *name;
+       struct powercap_zone *power_zone = to_powercap_zone(dev);
+       int id;
+       ssize_t len = -ENODATA;
+       struct powercap_zone_constraint *pconst;
+
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+               return -EINVAL;
+       if (id >= power_zone->const_id_cnt)
+               return -EINVAL;
+       pconst = &power_zone->constraints[id];
+
+       if (pconst && pconst->ops && pconst->ops->get_name) {
+               name = pconst->ops->get_name(power_zone, id);
+               if (name) {
+                       snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+                                                               "%s\n", name);
+                       buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+                       len = strlen(buf);
+               }
+       }
+
+       return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+                               int mode,
+                               struct device_attribute *dev_attr,
+                               ssize_t (*show)(struct device *,
+                                       struct device_attribute *, char *),
+                               ssize_t (*store)(struct device *,
+                                       struct device_attribute *,
+                               const char *, size_t)
+                               )
+{
+
+       dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+                                                               id, name);
+       if (!dev_attr->attr.name)
+               return -ENOMEM;
+       dev_attr->attr.mode = mode;
+       dev_attr->show = show;
+       dev_attr->store = store;
+
+       return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+       int i;
+
+       for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+               kfree(constraint_attrs[i].power_limit_attr.attr.name);
+               kfree(constraint_attrs[i].time_window_attr.attr.name);
+               kfree(constraint_attrs[i].name_attr.attr.name);
+               kfree(constraint_attrs[i].max_power_attr.attr.name);
+               kfree(constraint_attrs[i].min_power_attr.attr.name);
+               kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+               kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+       }
+}
+
+static int seed_constraint_attributes(void)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+               ret = create_constraint_attribute(i, "power_limit_uw",
+                                       S_IWUSR | S_IRUGO,
+                                       &constraint_attrs[i].power_limit_attr,
+                                       show_constraint_power_limit_uw,
+                                       store_constraint_power_limit_uw);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "time_window_us",
+                                       S_IWUSR | S_IRUGO,
+                                       &constraint_attrs[i].time_window_attr,
+                                       show_constraint_time_window_us,
+                                       store_constraint_time_window_us);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "name", S_IRUGO,
+                               &constraint_attrs[i].name_attr,
+                               show_constraint_name,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+                               &constraint_attrs[i].max_power_attr,
+                               show_constraint_max_power_uw,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+                               &constraint_attrs[i].min_power_attr,
+                               show_constraint_min_power_uw,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "max_time_window_us",
+                               S_IRUGO,
+                               &constraint_attrs[i].max_time_window_attr,
+                               show_constraint_max_time_window_us,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "min_time_window_us",
+                               S_IRUGO,
+                               &constraint_attrs[i].min_time_window_attr,
+                               show_constraint_min_time_window_us,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+
+       }
+
+       return 0;
+
+err_alloc:
+       free_constraint_attributes();
+
+       return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+                               int nr_constraints,
+                               struct powercap_zone_constraint_ops *const_ops)
+{
+       int i;
+       int ret = 0;
+       int count;
+       struct powercap_zone_constraint *pconst;
+
+       if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+                                       !const_ops->set_power_limit_uw ||
+                                       !const_ops->get_time_window_us ||
+                                       !const_ops->set_time_window_us)
+               return -EINVAL;
+
+       count = power_zone->zone_attr_count;
+       for (i = 0; i < nr_constraints; ++i) {
+               pconst = &power_zone->constraints[i];
+               pconst->ops = const_ops;
+               pconst->id = power_zone->const_id_cnt;
+               power_zone->const_id_cnt++;
+               power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].power_limit_attr.attr;
+               power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].time_window_attr.attr;
+               if (pconst->ops->get_name)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].name_attr.attr;
+               if (pconst->ops->get_max_power_uw)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].max_power_attr.attr;
+               if (pconst->ops->get_min_power_uw)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].min_power_attr.attr;
+               if (pconst->ops->get_max_time_window_us)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].max_time_window_attr.attr;
+               if (pconst->ops->get_min_time_window_us)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].min_time_window_attr.attr;
+       }
+       power_zone->zone_attr_count = count;
+
+       return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+       struct powercap_control_type *pos = NULL;
+       bool found = false;
+
+       mutex_lock(&powercap_cntrl_list_lock);
+
+       list_for_each_entry(pos, &powercap_cntrl_list, node) {
+               if (pos == control_type) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return found;
+}
+
+static ssize_t name_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+       return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+                                       struct powercap_zone *power_zone)
+{
+       int count = 0;
+
+       power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+       if (power_zone->ops->get_max_energy_range_uj)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_max_energy_range_uj.attr;
+       if (power_zone->ops->get_energy_uj)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_energy_uj.attr;
+       if (power_zone->ops->get_power_uw)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_power_uw.attr;
+       if (power_zone->ops->get_max_power_range_uw)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_max_power_range_uw.attr;
+       power_zone->zone_dev_attrs[count] = NULL;
+       power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+       bool allocated;
+
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+               /* Store flag as the release() may free memory */
+               allocated = power_zone->allocated;
+               /* Remove id from parent idr struct */
+               idr_remove(power_zone->parent_idr, power_zone->id);
+               /* Destroy idrs allocated for this zone */
+               idr_destroy(&power_zone->idr);
+               kfree(power_zone->name);
+               kfree(power_zone->zone_dev_attrs);
+               kfree(power_zone->constraints);
+               if (power_zone->ops->release)
+                       power_zone->ops->release(power_zone);
+               if (allocated)
+                       kfree(power_zone);
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+
+               /* Store flag as the release() may free memory */
+               allocated = control_type->allocated;
+               idr_destroy(&control_type->idr);
+               mutex_destroy(&control_type->lock);
+               if (control_type->ops && control_type->ops->release)
+                       control_type->ops->release(control_type);
+               if (allocated)
+                       kfree(control_type);
+       }
+}
+
+static ssize_t enabled_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       bool mode = true;
+
+       /* Default is enabled */
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+               if (power_zone->ops->get_enable)
+                       if (power_zone->ops->get_enable(power_zone, &mode))
+                               mode = false;
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+               if (control_type->ops && control_type->ops->get_enable)
+                       if (control_type->ops->get_enable(control_type, &mode))
+                               mode = false;
+       }
+
+       return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf,  size_t len)
+{
+       bool mode;
+
+       if (strtobool(buf, &mode))
+               return -EINVAL;
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+               if (power_zone->ops->set_enable)
+                       if (!power_zone->ops->set_enable(power_zone, mode))
+                               return len;
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+               if (control_type->ops && control_type->ops->set_enable)
+                       if (!control_type->ops->set_enable(control_type, mode))
+                               return len;
+       }
+
+       return -ENOSYS;
+}
+
+static struct device_attribute powercap_def_attrs[] = {
+               __ATTR(enabled, S_IWUSR | S_IRUGO, enabled_show,
+                                                       enabled_store),
+               __ATTR_NULL
+};
+
+static struct class powercap_class = {
+       .name = "powercap",
+       .dev_release = powercap_release,
+       .dev_attrs = powercap_def_attrs,
+};
+
+struct powercap_zone *powercap_register_zone(
+                               struct powercap_zone *power_zone,
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               struct powercap_zone *parent,
+                               const struct powercap_zone_ops *ops,
+                               int nr_constraints,
+                               struct powercap_zone_constraint_ops *const_ops)
+{
+       int result;
+       int nr_attrs;
+
+       if (!name || !control_type || !ops ||
+                       nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+                       (!ops->get_energy_uj && !ops->get_power_uw) ||
+                       !control_type_valid(control_type))
+               return ERR_PTR(-EINVAL);
+
+       if (power_zone) {
+               if (!ops->release)
+                       return ERR_PTR(-EINVAL);
+               memset(power_zone, 0, sizeof(*power_zone));
+       } else {
+               power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+               if (!power_zone)
+                       return ERR_PTR(-ENOMEM);
+               power_zone->allocated = true;
+       }
+       power_zone->ops = ops;
+       power_zone->control_type_inst = control_type;
+       if (!parent) {
+               power_zone->dev.parent = &control_type->dev;
+               power_zone->parent_idr = &control_type->idr;
+       } else {
+               power_zone->dev.parent = &parent->dev;
+               power_zone->parent_idr = &parent->idr;
+       }
+       power_zone->dev.class = &powercap_class;
+
+       mutex_lock(&control_type->lock);
+       /* Using idr to get the unique id */
+       result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+       if (result < 0)
+               goto err_idr_alloc;
+
+       power_zone->id = result;
+       idr_init(&power_zone->idr);
+       power_zone->name = kstrdup(name, GFP_KERNEL);
+       if (!power_zone->name)
+               goto err_name_alloc;
+       dev_set_name(&power_zone->dev, "%s:%x",
+                                       dev_name(power_zone->dev.parent),
+                                       power_zone->id);
+       power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+                                        nr_constraints, GFP_KERNEL);
+       if (!power_zone->constraints)
+               goto err_const_alloc;
+
+       nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+                                               POWERCAP_ZONE_MAX_ATTRS + 1;
+       power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+                                               nr_attrs, GFP_KERNEL);
+       if (!power_zone->zone_dev_attrs)
+               goto err_attr_alloc;
+       create_power_zone_common_attributes(power_zone);
+       result = create_constraints(power_zone, nr_constraints, const_ops);
+       if (result)
+               goto err_dev_ret;
+
+       power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+       power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+       power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+       power_zone->dev_attr_groups[1] = NULL;
+       power_zone->dev.groups = power_zone->dev_attr_groups;
+       result = device_register(&power_zone->dev);
+       if (result)
+               goto err_dev_ret;
+
+       control_type->nr_zones++;
+       mutex_unlock(&control_type->lock);
+
+       return power_zone;
+
+err_dev_ret:
+       kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+       kfree(power_zone->constraints);
+err_const_alloc:
+       kfree(power_zone->name);
+err_name_alloc:
+       idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+       if (power_zone->allocated)
+               kfree(power_zone);
+       mutex_unlock(&control_type->lock);
+
+       return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+                               struct powercap_zone *power_zone)
+{
+       if (!power_zone || !control_type)
+               return -EINVAL;
+
+       mutex_lock(&control_type->lock);
+       control_type->nr_zones--;
+       mutex_unlock(&control_type->lock);
+
+       device_unregister(&power_zone->dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               const struct powercap_control_type_ops *ops)
+{
+       int result;
+
+       if (!name)
+               return ERR_PTR(-EINVAL);
+       if (control_type) {
+               if (!ops || !ops->release)
+                       return ERR_PTR(-EINVAL);
+               memset(control_type, 0, sizeof(*control_type));
+       } else {
+               control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+               if (!control_type)
+                       return ERR_PTR(-ENOMEM);
+               control_type->allocated = true;
+       }
+       mutex_init(&control_type->lock);
+       control_type->ops = ops;
+       INIT_LIST_HEAD(&control_type->node);
+       control_type->dev.class = &powercap_class;
+       dev_set_name(&control_type->dev, name);
+       result = device_register(&control_type->dev);
+       if (result) {
+               if (control_type->allocated)
+                       kfree(control_type);
+               return ERR_PTR(result);
+       }
+       idr_init(&control_type->idr);
+
+       mutex_lock(&powercap_cntrl_list_lock);
+       list_add_tail(&control_type->node, &powercap_cntrl_list);
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+       struct powercap_control_type *pos = NULL;
+
+       if (control_type->nr_zones) {
+               dev_err(&control_type->dev, "Zones of this type still not freed\n");
+               return -EINVAL;
+       }
+       mutex_lock(&powercap_cntrl_list_lock);
+       list_for_each_entry(pos, &powercap_cntrl_list, node) {
+               if (pos == control_type) {
+                       list_del(&control_type->node);
+                       mutex_unlock(&powercap_cntrl_list_lock);
+                       device_unregister(&control_type->dev);
+                       return 0;
+               }
+       }
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+       int result = 0;
+
+       result = seed_constraint_attributes();
+       if (result)
+               return result;
+
+       result = class_register(&powercap_class);
+
+       return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
index 1a78163907734be9e1e49bd3bca6e2fc19e7b6c7..b9f2653e4ef90f315de25752fe24821a98497848 100644 (file)
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
                struct of_regulator_match **da9063_reg_matches)
 {
        da9063_reg_matches = NULL;
-       return PTR_ERR(-ENODEV);
+       return ERR_PTR(-ENODEV);
 }
 #endif
 
index 488dfe7ce9a6c048a751029451df59d6f6f2c074..7e2b165972e6edd6e3eee6b26478cb263db5d048 100644 (file)
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
 #define SMPS_CTRL_MODE_ECO             0x02
 #define SMPS_CTRL_MODE_PWM             0x03
 
-/* These values are derived from the data sheet. And are the number of steps
- * where there is a voltage change, the ranges at beginning and end of register
- * max/min values where there are no change are ommitted.
- *
- * So they are basically (maxV-minV)/stepV
- */
-#define PALMAS_SMPS_NUM_VOLTAGES       117
+#define PALMAS_SMPS_NUM_VOLTAGES       122
 #define PALMAS_SMPS10_NUM_VOLTAGES     2
 #define PALMAS_LDO_NUM_VOLTAGES                50
 
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        pmic->desc[id].min_uV = 900000;
                        pmic->desc[id].uV_step = 50000;
                        pmic->desc[id].linear_min_sel = 1;
+                       pmic->desc[id].enable_time = 500;
                        pmic->desc[id].vsel_reg =
                                        PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
                                                palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                                pmic->desc[id].min_uV = 450000;
                                pmic->desc[id].uV_step = 25000;
                        }
+
+                       /* LOD6 in vibrator mode will have enable time 2000us */
+                       if (pdata && pdata->ldo6_vibrator &&
+                               (id == PALMAS_REG_LDO6))
+                               pmic->desc[id].enable_time = 2000;
                } else {
                        pmic->desc[id].n_voltages = 1;
                        pmic->desc[id].ops = &palmas_ops_extreg;
index d8e3e1262bc2960b4eebf89b5c552ef6e04df305..20c271d49dcbbd358e03d5140d05e9da4ca54952 100644 (file)
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
        ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
                   abb->base);
 
-       /* program LDO VBB vset override if needed */
-       if (abb->ldo_base)
+       /*
+        * program LDO VBB vset override if needed for !bypass mode
+        * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
+        * be performed *before* switch to bias mode else VBB glitches.
+        */
+       if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
                ti_abb_program_ldovbb(dev, abb, info);
 
        /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
        if (ret)
                goto out;
 
+       /*
+        * Reset LDO VBB vset override bypass mode
+        * XXX: Do not switch sequence - for bypass, LDO override reset *must*
+        * be performed *after* switch to bypass else VBB glitches.
+        */
+       if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
+               ti_abb_program_ldovbb(dev, abb, info);
+
 out:
        return ret;
 }
index 1432b26ef2e97b0830a2ecf973789ee20b7991cc..2205fbc2c37b4cf9b917bafa5d1f583f7e767b49 100644 (file)
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
  */
 
 static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
-       { .min_uV =  900000, .max_uV = 1650000, .min_sel =  0, .max_sel = 14,
+       { .min_uV =  900000, .max_uV = 1600000, .min_sel =  0, .max_sel = 14,
          .uV_step =  50000 },
        { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
          .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
  */
 
 static const struct regulator_linear_range wm831x_aldo_ranges[] = {
-       { .min_uV = 1000000, .max_uV = 1650000, .min_sel =  0, .max_sel = 12,
+       { .min_uV = 1000000, .max_uV = 1600000, .min_sel =  0, .max_sel = 12,
          .uV_step =  50000 },
        { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
          .uV_step = 100000 },
index 835b5f0f344ed2537115b1ed486d14860f88434e..61ca9292a42944229f9f52bc6f5b8943b6414b22 100644 (file)
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
 }
 
 static const struct regulator_linear_range wm8350_ldo_ranges[] = {
-       { .min_uV =  900000, .max_uV = 1750000, .min_sel =  0, .max_sel = 15,
+       { .min_uV =  900000, .max_uV = 1650000, .min_sel =  0, .max_sel = 15,
          .uV_step =  50000 },
        { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
          .uV_step = 100000 },
index 5adb2042e824fc30815ea891c54b89526680b4eb..cee7e2708a1fe35359eb81cc458d939e50ad1906 100644 (file)
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        int intensity = 0;
        int r0_perm;
        int nr_tracks;
+       int use_prefix;
 
        startdev = dasd_alias_get_start_dev(base);
        if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
                intensity = fdata->intensity;
        }
 
+       use_prefix = base_priv->features.feature[8] & 0x01;
+
        switch (intensity) {
        case 0x00:      /* Normal format */
        case 0x08:      /* Normal format, use cdl. */
                cplength = 2 + (rpt*nr_tracks);
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x01:      /* Write record zero and format track. */
        case 0x09:      /* Write record zero and format track, use cdl. */
                cplength = 2 + rpt * nr_tracks;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x04:      /* Invalidate track. */
        case 0x0c:      /* Invalidate track, use cdl. */
                cplength = 3;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
                break;
        default:
                dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
 
        switch (intensity & ~0x08) {
        case 0x00: /* Normal format. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               /* grant subsystem permission to format R0 */
-               if (r0_perm)
-                       ((struct PFX_eckd_data *)data)
-                               ->define_extent.ga_extended |= 0x04;
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct PFX_eckd_data *)data)
+                                       ->define_extent.ga_extended |= 0x04;
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                                     fdata->start_unit, fdata->stop_unit,
+                                     DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct DE_eckd_data *) data)
+                                       ->ga_extended |= 0x04;
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x01: /* Write record zero + format track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
-                      base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+                              base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x04: /* Invalidate track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, 1,
index 8b387b32fd622b981a19e951eca1b3fd30c783bf..e59331e6c2e5ef682dae02f7f5fbebe1bae1b5c8 100644 (file)
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
 
 static inline void SCM_LOG_HEX(int level, void *data, int length)
 {
-       if (level > scm_debug->level)
+       if (!debug_level_enabled(scm_debug, level))
                return;
        while (length > 0) {
                debug_event(scm_debug, level, data, length);
index 4600aa10a1c6c5824f36168fb83b414d2e4e915b..668b32b0dc1dc39317ebbc682f55bedbc86537c3 100644 (file)
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
        struct appldata_product_id id;
        int rc;
 
-       strcpy(id.prod_nr, "LNXAPPL");
+       strncpy(id.prod_nr, "LNXAPPL", 7);
        id.prod_fn = myhdr->applid;
        id.record_nr = myhdr->record_num;
        id.version_nr = myhdr->version;
index 24a08e8f19e1b9ed3366b5f035d4a0fd214bf97e..2cdec21e8924ea7b0403aafd1b0bae172a76416e 100644 (file)
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
 
        if (rp->state != RAW3270_STATE_RESET)
                return;
-       if (rq && rq->rc) {
+       if (rq->rc) {
                /* Reset command failed. */
                rp->state = RAW3270_STATE_INIT;
-       } else if (0 && MACHINE_IS_VM) {
+       } else if (MACHINE_IS_VM) {
                raw3270_size_device_vm(rp);
                raw3270_size_device_done(rp);
        } else
index a3aa374799dcf99bd334b6e49c8d7753838fbdc3..1fe264379e0d10b57ddca456ce16b72c23535ccb 100644 (file)
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
        timeout = 0;
        if (timer_pending(&sclp_request_timer)) {
                /* Get timeout TOD value */
-               timeout = get_tod_clock() +
+               timeout = get_tod_clock_fast() +
                          sclp_tod_from_jiffies(sclp_request_timer.expires -
                                                jiffies);
        }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
                if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock() > timeout &&
+                   get_tod_clock_fast() > timeout &&
                    del_timer(&sclp_request_timer))
                        sclp_request_timer.function(sclp_request_timer.data);
                cpu_relax();
index 8cd34bf644b3ee4b34065814fab4300774c8ad23..77df9cb00688feeda6cad1fbfd4623fc5a26134e 100644 (file)
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void)
 
        if (sccb->header.response_code != 0x20)
                return 0;
-       if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
-               return 1;
-       return 0;
+       if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+               return 0;
+       if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+               return 0;
+       return 1;
 }
 
 bool __init sclp_has_vt220(void)
index a0f47c83fd62f94205a987a728edf207f86c3bc6..3f4ca4e09a4ccc4d49fba39d64094fafe1f1fdc7 100644 (file)
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work)
        struct winsize ws;
 
        screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
-       if (!screen)
+       if (IS_ERR(screen))
                return;
        /* Switch to new output size */
        spin_lock_bh(&tp->view.lock);
index 9b3a24e8d3a0e0dfec9ffcbf1757ddd37f66329e..cf31d3321dab86b889b16fa5e3b9b2e433c3992d 100644 (file)
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
        int ret;
 
        dev_num = iminor(inode);
-       if (dev_num > MAXMINOR)
+       if (dev_num >= MAXMINOR)
                return -ENODEV;
        logptr = &sys_ser[dev_num];
 
index 794820a123d0c83c07b1b2e546efc84c25daa14a..ffb1fcf0bf5bed9928f53e1205f8b6b01635f6b6 100644 (file)
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
 
        /* get info for boot cpu from lowcore, stored in the HSA */
 
-       sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+       sa = dump_save_area_create(0);
        if (!sa)
                return -ENOMEM;
        if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
                kfree(sa);
                return -EIO;
        }
-       zfcpdump_save_areas[0] = sa;
        return 0;
 }
 
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
 {
        unsigned long end;
-       int i = 0;
+       int i;
 
        if (count == 0)
                return 0;
 
        end = start + count;
-       while (zfcpdump_save_areas[i]) {
+       for (i = 0; i < dump_save_areas.count; i++) {
                unsigned long cp_start, cp_end; /* copy range */
                unsigned long sa_start, sa_end; /* save area range */
                unsigned long prefix;
                unsigned long sa_off, len, buf_off;
+               struct save_area *save_area = dump_save_areas.areas[i];
 
-               prefix = zfcpdump_save_areas[i]->pref_reg;
+               prefix = save_area->pref_reg;
                sa_start = prefix + sys_info.sa_base;
                sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
 
                if ((end < sa_start) || (start > sa_end))
-                       goto next;
+                       continue;
                cp_start = max(start, sa_start);
                cp_end = min(end, sa_end);
 
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
                len = cp_end - cp_start;
 
                TRACE("copy_lc for: %lx\n", start);
-               if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+               if (copy_lc(buf + buf_off, save_area, sa_off, len))
                        return -EFAULT;
-next:
-               i++;
        }
        return 0;
 }
@@ -637,8 +635,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
        hdr->num_pages = mem_size / PAGE_SIZE;
        hdr->tod = get_tod_clock();
        get_cpu_id(&hdr->cpu_id);
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               prefix = zfcpdump_save_areas[i]->pref_reg;
+       for (i = 0; i < dump_save_areas.count; i++) {
+               prefix = dump_save_areas.areas[i]->pref_reg;
                hdr->real_cpu_cnt++;
                if (!prefix)
                        continue;
index d028fd800c9c6afd7f5b5627475c4bfe6552f04b..f055df0b167fc83e1f2f6f17e0aef47e129007b4 100644 (file)
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
  */
 unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
        unsigned long bit;
 
        if (!iv->avail)
                return -1UL;
        spin_lock(&iv->lock);
-       bit = find_first_bit_left(iv->avail, iv->bits);
+       bit = find_first_bit_inv(iv->avail, iv->bits);
        if (bit < iv->bits) {
-               clear_bit(bit ^ be_to_le, iv->avail);
+               clear_bit_inv(bit, iv->avail);
                if (bit >= iv->end)
                        iv->end = bit + 1;
        } else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
  */
 void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
-
        if (!iv->avail)
                return;
        spin_lock(&iv->lock);
        /* Clear (possibly left over) interrupt bit */
-       clear_bit(bit ^ be_to_le, iv->vector);
+       clear_bit_inv(bit, iv->vector);
        /* Make the bit position available again */
-       set_bit(bit ^ be_to_le, iv->avail);
+       set_bit_inv(bit, iv->avail);
        if (bit == iv->end - 1) {
                /* Find new end of bit-field */
                while (--iv->end > 0)
-                       if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+                       if (!test_bit_inv(iv->end - 1, iv->avail))
                                break;
        }
        spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
 unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
                           unsigned long end)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
        unsigned long bit;
 
        /* Find non-zero bit starting from 'ivs->next'. */
-       bit = find_next_bit_left(iv->vector, end, start);
+       bit = find_next_bit_inv(iv->vector, end, start);
        if (bit >= end)
                return -1UL;
-       /* Clear interrupt bit (find left uses big-endian bit numbers) */
-       clear_bit(bit ^ be_to_le, iv->vector);
+       clear_bit_inv(bit, iv->vector);
        return bit;
 }
 EXPORT_SYMBOL(airq_iv_scan);
index d7da67a31c77f606ef68445f9da446b521a4abf1..88e35d85d205f7c21de1f81860865386a7845289 100644 (file)
@@ -878,9 +878,9 @@ static void css_reset(void)
                        atomic_inc(&chpid_reset_count);
        }
        /* Wait for machine check for all channel paths. */
-       timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
+       timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
        while (atomic_read(&chpid_reset_count) != 0) {
-               if (get_tod_clock() > timeout)
+               if (get_tod_clock_fast() > timeout)
                        break;
                cpu_relax();
        }
index d9eddcba7e884d788a9d8c1f3dad14bc3d71cf77..aca7bfc113aaeb4067043cd10bdc0662dd8ec286 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/kernel_stat.h>
+#include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
 
 static void EADM_LOG_HEX(int level, void *data, int length)
 {
-       if (level > eadm_debug->level)
+       if (!debug_level_enabled(eadm_debug, level))
                return;
        while (length > 0) {
                debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        }
        scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
        private->state = EADM_IDLE;
+
+       if (private->completion)
+               complete(private->completion);
 }
 
 static struct subchannel *eadm_get_idle_sch(void)
@@ -255,13 +259,32 @@ out:
 
 static void eadm_quiesce(struct subchannel *sch)
 {
+       struct eadm_private *private = get_eadm_private(sch);
+       DECLARE_COMPLETION_ONSTACK(completion);
        int ret;
 
+       spin_lock_irq(sch->lock);
+       if (private->state != EADM_BUSY)
+               goto disable;
+
+       if (eadm_subchannel_clear(sch))
+               goto disable;
+
+       private->completion = &completion;
+       spin_unlock_irq(sch->lock);
+
+       wait_for_completion_io(&completion);
+
+       spin_lock_irq(sch->lock);
+       private->completion = NULL;
+
+disable:
+       eadm_subchannel_set_timeout(sch, 0);
        do {
-               spin_lock_irq(sch->lock);
                ret = cio_disable_subchannel(sch);
-               spin_unlock_irq(sch->lock);
        } while (ret == -EBUSY);
+
+       spin_unlock_irq(sch->lock);
 }
 
 static int eadm_subchannel_remove(struct subchannel *sch)
index 2779be093982ee1c219f4c9c6c24ab8edebe8ce3..9664e4653f9861416a78f4193cbe57d8d3105864 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef EADM_SCH_H
 #define EADM_SCH_H
 
+#include <linux/completion.h>
 #include <linux/device.h>
 #include <linux/timer.h>
 #include <linux/list.h>
@@ -9,9 +10,10 @@
 struct eadm_private {
        union orb orb;
        enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+       struct completion *completion;
+       struct subchannel *sch;
        struct timer_list timer;
        struct list_head head;
-       struct subchannel *sch;
 } __aligned(8);
 
 #define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
index 647b422bb22a594b760d0a5590c962a7683192b6..dfac9bfefea3f11a9f61069006281a750551f39c 100644 (file)
 extern debug_info_t *qdio_dbf_setup;
 extern debug_info_t *qdio_dbf_error;
 
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define DBF_ERR                3       /* error conditions     */
 #define DBF_WARN       4       /* warning conditions   */
 #define DBF_INFO       6       /* informational        */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
 #define DBF_DEV_EVENT(level, device, text...) \
        do { \
                char debug_buffer[QDIO_DBF_LEN]; \
-               if (qdio_dbf_passes(device->debug_area, level)) { \
+               if (debug_level_enabled(device->debug_area, level)) { \
                        snprintf(debug_buffer, QDIO_DBF_LEN, text); \
                        debug_text_event(device->debug_area, level, debug_buffer); \
                } \
index 8ed52aa4912269405158e300f1c983577b29caec..3e602e8affa78cba97282e41c5f9e3a18315e45b 100644 (file)
@@ -338,10 +338,10 @@ again:
                retries++;
 
                if (!start_time) {
-                       start_time = get_tod_clock();
+                       start_time = get_tod_clock_fast();
                        goto again;
                }
-               if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+               if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
                        goto again;
        }
        if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        /*
         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        case SLSB_P_INPUT_PRIMED:
                inbound_primed(q, count);
                q->first_to_check = add_buf(q->first_to_check, count);
-               if (atomic_sub(count, &q->nr_buf_used) == 0)
+               if (atomic_sub_return(count, &q->nr_buf_used) == 0)
                        qperf_inc(q, inbound_queue_full);
                if (q->irq_ptr->perf_stat_enabled)
                        account_sbals(q, count);
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
         * At this point we know, that inbound first_to_check
         * has (probably) not moved (see qdio_inbound_processing).
         */
-       if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+       if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
                              q->first_to_check);
                return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        if (need_siga_sync(q))
                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
index 841ea72e4a4e93a12d397070530191d4b02adb5d..28d9349de1adf77cf20dbe5f938d6a27afd3eaeb 100644 (file)
 /* that gives us 15 characters in the text event views */
 #define ZCRYPT_DBF_LEN 16
 
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define DBF_ERR                3       /* error conditions     */
 #define DBF_WARN       4       /* warning conditions   */
 #define DBF_INFO       6       /* informational        */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_COMMON(level, text...) \
        do { \
-               if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+               if (debug_level_enabled(zcrypt_dbf_common, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_DEVICES(level, text...) \
        do { \
-               if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+               if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_DEV(level, device, text...) \
        do { \
-               if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+               if (debug_level_enabled(device->dbf_area, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(device->dbf_area, level, \
index 1bc5904df19ff550d3094ed939faab28f5cac44c..3339b9b607b3aa6ca5ad5ba60c20cd5c3144df55 100644 (file)
@@ -114,15 +114,9 @@ do { \
        debug_event(claw_dbf_##name,level,(void*)(addr),len); \
 } while (0)
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define CLAW_DBF_TEXT_(level,name,text...) \
        do { \
-               if (claw_dbf_passes(claw_dbf_##name, level)) { \
+               if (debug_level_enabled(claw_dbf_##name, level)) { \
                        sprintf(debug_buffer, text); \
                        debug_text_event(claw_dbf_##name, level, \
                                                debug_buffer); \
index 6514e1cb3f1cf1b21cc7bd2d4422ca48fe2b7d47..8363f1c966ef7e5a2e55d6f2a861ae011f9601d7 100644 (file)
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
        char dbf_txt_buf[64];
        va_list args;
 
-       if (level > (ctcm_dbf[dbf_nix].id)->level)
+       if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
                return;
        va_start(args, fmt);
        vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
index 8c03392ac833f2a284c7f422f296b52fc610e285..150fcb4cebc3510e5a4e45dfb61f63728cd5ed7d 100644 (file)
@@ -16,15 +16,9 @@ do { \
        debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
 } while (0)
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define LCS_DBF_TEXT_(level,name,text...) \
        do { \
-               if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+               if (debug_level_enabled(lcs_dbf_##name, level)) { \
                        sprintf(debug_buffer, text); \
                        debug_text_event(lcs_dbf_##name, level, debug_buffer); \
                } \
index 279ad504ec3c85e3e410d231fac82e812a75b5d2..9b333fcf1a4c38ea45d352f253aaa47838d78c91 100644 (file)
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
 
 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define IUCV_DBF_TEXT_(name, level, text...) \
        do { \
-               if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+               if (debug_level_enabled(iucv_dbf_##name, level)) { \
                        char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
                        sprintf(__buf, text); \
                        debug_text_event(iucv_dbf_##name, level, __buf); \
index 0a328d0d11bebaac40ccbaf2239ca999dd736373..d7b66a28fe75577dd6ccaef41619eb19b327df34 100644 (file)
@@ -5096,7 +5096,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
        char dbf_txt_buf[32];
        va_list args;
 
-       if (level > id->level)
+       if (!debug_level_enabled(id, level))
                return;
        va_start(args, fmt);
        vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
index 3ac7a4b30dd910ef6f59c4ada70966d66694c90e..0be3d48681aead94466a71ab7b34c6ae7ca098ff 100644 (file)
@@ -278,7 +278,7 @@ struct zfcp_dbf {
 static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
-       if (level <= req->adapter->dbf->hba->level)
+       if (debug_level_enabled(req->adapter->dbf->hba, level))
                zfcp_dbf_hba_fsf_res(tag, req);
 }
 
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
        struct zfcp_adapter *adapter = (struct zfcp_adapter *)
                                        scmd->device->host->hostdata[0];
 
-       if (level <= adapter->dbf->scsi->level)
+       if (debug_level_enabled(adapter->dbf->scsi, level))
                zfcp_dbf_scsi(tag, scmd, req);
 }
 
index feab3a5e50b5ec6f546b102b6550ff7652424cac..757eb0716d45d4f273519ca2e2dd2ed03486a4a8 100644 (file)
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
        while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
                                        PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
                                        pci_device)) != NULL) {
-               struct blogic_adapter *adapter = adapter;
+               struct blogic_adapter *host_adapter = adapter;
                struct blogic_adapter_info adapter_info;
                enum blogic_isa_ioport mod_ioaddr_req;
                unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   known and enabled, note that the particular Standard ISA I/O
                   Address should not be probed.
                 */
-               adapter->io_addr = io_addr;
-               blogic_intreset(adapter);
-               if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+               host_adapter->io_addr = io_addr;
+               blogic_intreset(host_adapter);
+               if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
                                &adapter_info, sizeof(adapter_info)) ==
                                sizeof(adapter_info)) {
                        if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   I/O Address assigned at system initialization.
                 */
                mod_ioaddr_req = BLOGIC_IO_DISABLE;
-               blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+               blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
                                sizeof(mod_ioaddr_req), NULL, 0);
                /*
                   For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 
                        fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
                        fetch_localram.count = sizeof(autoscsi_byte45);
-                       blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
+                       blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
                                        &fetch_localram, sizeof(fetch_localram),
                                        &autoscsi_byte45,
                                        sizeof(autoscsi_byte45));
-                       blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
-                                       sizeof(id));
+                       blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+                                       &id, sizeof(id));
                        if (id.fw_ver_digit1 == '5')
                                force_scan_order =
                                        autoscsi_byte45.force_scan_order;
index f8ca7becacca15477dd04912acf4ccd11df948fc..7591fa4e28bb2be8ed6e6a64e972568429457f20 100644 (file)
@@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        bfad->pcidev = pdev;
 
        /* Adjust PCIe Maximum Read Request Size */
-       if (pcie_max_read_reqsz > 0) {
-               int pcie_cap_reg;
-               u16 pcie_dev_ctl;
-               u16 mask = 0xffff;
-
-               switch (pcie_max_read_reqsz) {
-               case 128:
-                       mask = 0x0;
-                       break;
-               case 256:
-                       mask = 0x1000;
-                       break;
-               case 512:
-                       mask = 0x2000;
-                       break;
-               case 1024:
-                       mask = 0x3000;
-                       break;
-               case 2048:
-                       mask = 0x4000;
-                       break;
-               case 4096:
-                       mask = 0x5000;
-                       break;
-               default:
-                       break;
-               }
-
-               pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-               if (mask != 0xffff && pcie_cap_reg) {
-                       pcie_cap_reg += 0x08;
-                       pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
-                       if ((pcie_dev_ctl & 0x7000) != mask) {
-                               printk(KERN_WARNING "BFA[%s]: "
+       if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+               if (pcie_max_read_reqsz >= 128 &&
+                   pcie_max_read_reqsz <= 4096 &&
+                   is_power_of_2(pcie_max_read_reqsz)) {
+                       int max_rq = pcie_get_readrq(pdev);
+                       printk(KERN_WARNING "BFA[%s]: "
                                "pcie_max_read_request_size is %d, "
-                               "reset to %d\n", bfad->pci_name,
-                               (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
+                               "reset to %d\n", bfad->pci_name, max_rq,
                                pcie_max_read_reqsz);
-
-                               pcie_dev_ctl &= ~0x7000;
-                               pci_write_config_word(pdev, pcie_cap_reg,
-                                               pcie_dev_ctl | mask);
-                       }
+                       pcie_set_readrq(pdev, pcie_max_read_reqsz);
+               } else {
+                       printk(KERN_WARNING "BFA[%s]: invalid "
+                              "pcie_max_read_request_size %d ignored\n",
+                              bfad->pci_name, pcie_max_read_reqsz);
                }
        }
 
index 0eb35b9b37843852e140e1bb3a601c5fa3e0f3a4..0eaec474895788a6437e12c9907e053c9f74b048 100644 (file)
@@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)
        return 0;
 }
 
-static void
-csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
-{
-       uint16_t val;
-       int pcie_cap;
-
-       if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
-               pci_read_config_word(hw->pdev,
-                                    pcie_cap + PCI_EXP_DEVCTL2, &val);
-               val &= 0xfff0;
-               val |= range ;
-               pci_write_config_word(hw->pdev,
-                                     pcie_cap + PCI_EXP_DEVCTL2, val);
-       }
-}
-
 /*****************************************************************************/
 /* HW State machine assists                                                  */
 /*****************************************************************************/
@@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)
                goto out;
        }
 
-       /* Set pci completion timeout value to 4 seconds. */
-       csio_set_pcie_completion_timeout(hw, 0xd);
+       /* Set PCIe completion timeout to 4 seconds */
+       if (pci_is_pcie(hw->pdev))
+               pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+                               PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
 
        hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
 
index 2ef497ebadc06500a2aa95090f26d188cf8bccb7..ee5c1833eb731cb1e0e731448c287ca1b8f5b2ad 100644 (file)
@@ -20,7 +20,7 @@
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3059       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
  * |                              |                    | 0x302d,0x3033  |
index df1b30ba938cc0578d409880711d2afa5b2a8898..ff9c86b1a0d896a870dd62b22515ee5dfc629284 100644 (file)
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        que = MSW(sts->handle);
        req = ha->req_q_map[que];
 
+       /* Check for invalid queue pointer */
+       if (req == NULL ||
+           que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+               ql_dbg(ql_dbg_io, vha, 0x3059,
+                   "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+                   "que=%u.\n", sts->handle, req, que);
+               return;
+       }
+
        /* Validate handle. */
        if (handle < req->num_outstanding_cmds)
                sp = req->outstanding_cmds[handle];
index 62ee7131b20420a7d1d363682dde7192ed152758..30d20e74e48a393b3560a44bb354fb1fd058e60e 100644 (file)
@@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 
        /* PCIe -- adjust Maximum Read Request Size (2048). */
-       if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+       if (pci_is_pcie(ha->pdev))
                pcie_set_readrq(ha->pdev, 2048);
 
        ha->chip_revision = ha->pdev->revision;
@@ -660,10 +660,8 @@ char *
 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
 {
        struct qla_hw_data *ha = vha->hw;
-       int pcie_reg;
 
-       pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
-       if (pcie_reg) {
+       if (pci_is_pcie(ha->pdev)) {
                strcpy(str, "PCIe iSA");
                return str;
        }
index 9f01bbbf3a26c9138a9f457fa9af999fa6572b09..bcd57f699ebbcc7dad7d597044c7bc54da22cf52 100644 (file)
@@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
        static char *pci_bus_modes[] = { "33", "66", "100", "133", };
        struct qla_hw_data *ha = vha->hw;
        uint32_t pci_bus;
-       int pcie_reg;
 
-       pcie_reg = pci_pcie_cap(ha->pdev);
-       if (pcie_reg) {
+       if (pci_is_pcie(ha->pdev)) {
                char lwstr[6];
-               uint16_t pcie_lstat, lspeed, lwidth;
+               uint32_t lstat, lspeed, lwidth;
 
-               pcie_reg += PCI_EXP_LNKCAP;
-               pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
-               lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
-               lwidth = (pcie_lstat &
-                   (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
+               pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+               lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+               lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 
                strcpy(str, "PCIe (");
                switch (lspeed) {
index d1549b74e2d1b91eeb4f8ebf459ec4f13db73275..7bd7f0d5f050a2ece3f176b1f05ca2f8936270a3 100644 (file)
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
 
        host_dev = scsi_get_device(shost);
        if (host_dev && host_dev->dma_mask)
-               bounce_limit = *host_dev->dma_mask;
+               bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
 
        return bounce_limit;
 }
index e62d17d41d4e7b84ddf7469194aa0c63d541d59d..5693f6d7eddb8b20cd9b3e0374a4e73e60ab7aa2 100644 (file)
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
                gd->events |= DISK_EVENT_MEDIA_CHANGE;
        }
 
+       blk_pm_runtime_init(sdp->request_queue, dev);
        add_disk(gd);
        if (sdkp->capacity)
                sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
-       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
index fd7cc566095a40cb22d27519726c4faa14a5f37d..d4ac60b4a56e8a6f4615fcac50015a06cccd53fc 100644 (file)
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
        /* Initialize the hardware */
        ret = clk_prepare_enable(clk);
        if (ret)
-               goto out_unmap_regs;
+               goto out_free_irq;
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        if (as->caps.has_wdrbt) {
@@ -1614,6 +1614,7 @@ out_free_dma:
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        clk_disable_unprepare(clk);
+out_free_irq:
        free_irq(irq, master);
 out_unmap_regs:
        iounmap(as->regs);
index 5655acf55bfe35a7d4f0fcb853e56435b4e847c9..6416798828e72bc9f5194282f55cf49a37f6809c 100644 (file)
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
                               dev_name(&pdev->dev), hw);
        if (ret) {
                dev_err(&pdev->dev, "Can't request IRQ\n");
-               clk_put(hw->spi_clk);
                goto clk_out;
        }
 
@@ -247,7 +246,6 @@ err_out:
                        gpio_free(hw->chipselect[i]);
 
        spi_master_put(master);
-       kfree(master);
 
        return ret;
 }
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
                        gpio_free(hw->chipselect[i]);
 
        spi_unregister_master(master);
-       kfree(master);
 
        return 0;
 }
index 6cd07d13ecab3b5fc78c263cb6220a3b0ae4ddb5..4e44575bd87a9674c72338cafe25c1b5006ef529 100644 (file)
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev)
        master->bus_num = bus_num;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get platform resource\n");
-               ret = -EINVAL;
-               goto out_master_put;
-       }
-
        dspi->base = devm_ioremap_resource(&pdev->dev, res);
-       if (!dspi->base) {
-               ret = -EINVAL;
+       if (IS_ERR(dspi->base)) {
+               ret = PTR_ERR(dspi->base);
                goto out_master_put;
        }
 
index dbc5e999a1f5689c1a526ace9f3dc4061f924223..6adf4e35816d76c1c7f120cd924420c4c8bf71a1 100644 (file)
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
        psc_num = master->bus_num;
        snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
        clk = devm_clk_get(dev, clk_name);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
                goto free_irq;
+       }
        ret = clk_prepare_enable(clk);
        if (ret)
                goto free_irq;
index 2eb06ee0b3264020d040c2b1ea8bd6745656c372..c1a50674c1e359deb3e83fc2a04c9acf2324f58d 100644 (file)
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
        if (pm_runtime_suspended(&drv_data->pdev->dev))
                return IRQ_NONE;
 
-       sccr1_reg = read_SSCR1(reg);
+       /*
+        * If the device is not yet in RPM suspended state and we get an
+        * interrupt that is meant for another device, check if status bits
+        * are all set to one. That means that the device is already
+        * powered off.
+        */
        status = read_SSSR(reg);
+       if (status == ~0)
+               return IRQ_NONE;
+
+       sccr1_reg = read_SSCR1(reg);
 
        /* Ignore possible writes if we don't need to write */
        if (!(sccr1_reg & SSCR1_TIE))
index 512b8893893bd3d8349506fde7785e29591236e2..a80376dc3a102d04684f27934a42412be3e851d7 100644 (file)
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
               S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
               sdd->regs + S3C64XX_SPI_INT_EN);
 
+       pm_runtime_enable(&pdev->dev);
+
        if (spi_register_master(master)) {
                dev_err(&pdev->dev, "cannot register SPI master\n");
                ret = -EBUSY;
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
                                        mem_res,
                                        sdd->rx_dma.dmach, sdd->tx_dma.dmach);
 
-       pm_runtime_enable(&pdev->dev);
-
        return 0;
 
 err3:
index 0b68cb592fa4d022bb6d3b3bc70296bcb224284b..e488a90a98b8acbfff5b1fd72dd92b54db2ae602 100644 (file)
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev)
                goto error1;
        }
 
+       pm_runtime_enable(&pdev->dev);
+
        master->num_chipselect  = 1;
        master->bus_num         = pdev->id;
        master->setup           = hspi_setup;
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
                goto error1;
        }
 
-       pm_runtime_enable(&pdev->dev);
-
        return 0;
 
  error1:
index 9e039c60c0680ae761e2be41f0de0a171368f3c6..740f9ddda227d55f15042e1e7a8d2cadb58aae71 100644 (file)
@@ -240,15 +240,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
 static int spi_drv_probe(struct device *dev)
 {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+       struct spi_device               *spi = to_spi_device(dev);
+       int ret;
+
+       acpi_dev_pm_attach(&spi->dev, true);
+       ret = sdrv->probe(spi);
+       if (ret)
+               acpi_dev_pm_detach(&spi->dev, true);
 
-       return sdrv->probe(to_spi_device(dev));
+       return ret;
 }
 
 static int spi_drv_remove(struct device *dev)
 {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+       struct spi_device               *spi = to_spi_device(dev);
+       int ret;
+
+       ret = sdrv->remove(spi);
+       acpi_dev_pm_detach(&spi->dev, true);
 
-       return sdrv->remove(to_spi_device(dev));
+       return ret;
 }
 
 static void spi_drv_shutdown(struct device *dev)
@@ -1025,8 +1037,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
                return AE_OK;
        }
 
+       adev->power.flags.ignore_parent = true;
        strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
        if (spi_add_device(spi)) {
+               adev->power.flags.ignore_parent = false;
                dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
                        dev_name(&adev->dev));
                spi_dev_put(spi);
index a84aab47a11315b137d370a9337a8f5d722bb644..f73287eab373a92b7f1a9876c3e53eb7f2cee327 100644 (file)
@@ -96,6 +96,15 @@ config COMEDI_SKEL
          To compile this driver as a module, choose M here: the module will be
          called skel.
 
+config COMEDI_SSV_DNP
+       tristate "SSV Embedded Systems DIL/Net-PC support"
+       depends on X86_32 || COMPILE_TEST
+       ---help---
+         Enable support for SSV Embedded Systems DIL/Net-PC
+
+         To compile this driver as a module, choose M here: the module will be
+         called ssv_dnp.
+
 endif # COMEDI_MISC_DRIVERS
 
 menuconfig COMEDI_ISA_DRIVERS
@@ -386,6 +395,14 @@ config COMEDI_DMM32AT
          To compile this driver as a module, choose M here: the module will be
          called dmm32at.
 
+config COMEDI_UNIOXX5
+       tristate "Fastwel UNIOxx-5 analog and digital io board support"
+       ---help---
+         Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
+
+         To compile this driver as a module, choose M here: the module will be
+         called unioxx5.
+
 config COMEDI_FL512
        tristate "FL512 ISA card support"
        ---help---
@@ -855,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
          To compile this driver as a module, choose M here: the module will be
          called dyna_pci10xx.
 
-config COMEDI_UNIOXX5
-       tristate "Fastwel UNIOxx-5 analog and digital io board support"
-       ---help---
-         Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
-
-         To compile this driver as a module, choose M here: the module will be
-         called unioxx5.
-
 config COMEDI_GSC_HPDI
        tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
        select COMEDI_FC
@@ -1085,14 +1094,6 @@ config COMEDI_S626
          To compile this driver as a module, choose M here: the module will be
          called s626.
 
-config COMEDI_SSV_DNP
-       tristate "SSV Embedded Systems DIL/Net-PC support"
-       ---help---
-         Enable support for SSV Embedded Systems DIL/Net-PC
-
-         To compile this driver as a module, choose M here: the module will be
-         called ssv_dnp.
-
 config COMEDI_MITE
        depends on HAS_DMA
        tristate
index 3ba4c5712dffe5c9277bab3a0a59933e2b5a7c59..853f62b2b1a982c70ba229f94b4697c9d1e869d3 100644 (file)
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
 {
        const struct ni_65xx_board *board = comedi_board(dev);
        struct ni_65xx_private *devpriv = dev->private;
-       unsigned base_bitfield_channel;
-       const unsigned max_ports_per_bitfield = 5;
+       int base_bitfield_channel;
        unsigned read_bits = 0;
-       unsigned j;
+       int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
+       int port_offset;
 
        base_bitfield_channel = CR_CHAN(insn->chanspec);
-       for (j = 0; j < max_ports_per_bitfield; ++j) {
-               const unsigned port_offset =
-                       ni_65xx_port_by_channel(base_bitfield_channel) + j;
-               const unsigned port =
-                       sprivate(s)->base_port + port_offset;
-               unsigned base_port_channel;
+       for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
+            port_offset <= last_port_offset; port_offset++) {
+               unsigned port = sprivate(s)->base_port + port_offset;
+               int base_port_channel = port_offset * ni_65xx_channels_per_port;
                unsigned port_mask, port_data, port_read_bits;
-               int bitshift;
-               if (port >= ni_65xx_total_num_ports(board))
+               int bitshift = base_port_channel - base_bitfield_channel;
+
+               if (bitshift >= 32)
                        break;
-               base_port_channel = port_offset * ni_65xx_channels_per_port;
                port_mask = data[0];
                port_data = data[1];
-               bitshift = base_port_channel - base_bitfield_channel;
-               if (bitshift >= 32 || bitshift <= -32)
-                       break;
                if (bitshift > 0) {
                        port_mask >>= bitshift;
                        port_data >>= bitshift;
index 724a685753dd8671fba84c9f3e14174164696e64..40ef785a04284d9a4c79633f2877a957b8c405dc 100644 (file)
@@ -474,7 +474,7 @@ static void dgap_cleanup_board(struct board_t *brd)
 
                 DGAP_LOCK(dgap_global_lock, flags);
                 brd->msgbuf = NULL;
-                printk(brd->msgbuf_head);
+                printk("%s", brd->msgbuf_head);
                 kfree(brd->msgbuf_head);
                 brd->msgbuf_head = NULL;
                 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -628,7 +628,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
        DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
        DGAP_LOCK(dgap_global_lock, flags);
        brd->msgbuf = NULL;
-       printk(brd->msgbuf_head);
+       printk("%s", brd->msgbuf_head);
        kfree(brd->msgbuf_head);
        brd->msgbuf_head = NULL;
        DGAP_UNLOCK(dgap_global_lock, flags);
@@ -955,25 +955,28 @@ static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
        char            buf[1024];
        int             i;
        unsigned long   flags;
+       size_t          length;
 
        DGAP_LOCK(dgap_global_lock, flags);
 
        /* Format buf using fmt and arguments contained in ap. */
        va_start(ap, fmt);
-       i = vsprintf(buf, fmt,  ap);
+       i = vsnprintf(buf, sizeof(buf), fmt,  ap);
        va_end(ap);
 
        DPR((buf));
 
        if (!brd || !brd->msgbuf) {
-               printk(buf);
+               printk("%s", buf);
                DGAP_UNLOCK(dgap_global_lock, flags);
                return;
        }
 
-       memcpy(brd->msgbuf, buf, strlen(buf));
-       brd->msgbuf += strlen(buf);
-       *brd->msgbuf = 0;
+       length = strlen(buf) + 1;
+       if (brd->msgbuf - brd->msgbuf_head < length)
+               length = brd->msgbuf - brd->msgbuf_head;
+       memcpy(brd->msgbuf, buf, length);
+       brd->msgbuf += length;
 
        DGAP_UNLOCK(dgap_global_lock, flags);
 }
index f8c1e22585d6cf81f3a4a52262b73608ab68f333..71d2b83cc3a12e365823df2e40fd176ac26120ef 100644 (file)
@@ -454,7 +454,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
 
                DGNC_LOCK(dgnc_global_lock, flags);
                brd->msgbuf = NULL;
-               printk(brd->msgbuf_head);
+               printk("%s", brd->msgbuf_head);
                kfree(brd->msgbuf_head);
                brd->msgbuf_head = NULL;
                DGNC_UNLOCK(dgnc_global_lock, flags);
@@ -710,7 +710,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
        DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
        DGNC_LOCK(dgnc_global_lock, flags);
        brd->msgbuf = NULL;
-       printk(brd->msgbuf_head);
+       printk("%s", brd->msgbuf_head);
        kfree(brd->msgbuf_head);
        brd->msgbuf_head = NULL;
        DGNC_UNLOCK(dgnc_global_lock, flags);
index 44cce2fa6361194a36ac72dff5825575fd324bf5..1d68c49afabea493e6f426f99e7235acc738abbb 100644 (file)
@@ -100,8 +100,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
         */
        if (!dev->dev.dma_mask)
                dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-       if (!dev->dev.coherent_dma_mask)
-               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               return retval;
 
        irq = platform_get_irq(dev, 0);
        if (irq < 0) {
index f73e58f5ef8d0390f8b4068db2030d9d1b42592c..61da7ee36e458ccd73a634839f5feda5a3d00808 100644 (file)
@@ -4797,21 +4797,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        pci_set_master(pdev);
 
        /* Check the DMA addressing support of this device */
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 64 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 32 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else {
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pdev->dev, "No usable DMA addressing method\n");
                rc = -EIO;
                goto err_release_res;
index db4d6dc032432cf0d92ff71d6c38e52dff17ce13..b36feb080cba886f8ebf9a456c519247376dbdcb 100644 (file)
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
 
 config IIO_SIMPLE_DUMMY_BUFFER
        boolean "Buffered capture support"
-       depends on IIO_KFIFO_BUF
+       select IIO_KFIFO_BUF
        help
          Add buffered data capture to the simple dummy driver.
 
index 351936c3efd698e8c5cbe76d7878eb8ad16c311f..e4998e4d4434b2af9365aacc1a8c4137a4c70544 100644 (file)
@@ -563,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
        mutex_init(&chip->lock);
 
        chip->lux_scale = 1;
+       chip->lux_uscale = 0;
        chip->range = 1000;
        chip->adc_bit = 16;
        chip->suspended = false;
index d2748c329eae6728b9c88c6b3a63065982d78555..c3f3f539e787e926b54f793958d8e9dac13aa6d7 100644 (file)
@@ -229,7 +229,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
        if (result < 0)
                return -EINVAL;
 
-       *val = result;
+       *val = sign_extend32(result, 15);
        return IIO_VAL_INT;
 }
 
index a802cf2491d61a6d55f95c6e6825792905fa118a..4c6d2041260bed259f7cba174c64ceb9a9c1a3f8 100644 (file)
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
        if (ret)
                iio_device_free(indio_dev);
 
-       return 0;
+       return ret;
 }
 
 static int ade7854_spi_remove(struct spi_device *spi)
index 47c5888461ffbeb3f4aa994af5b2418df6506e60..a475b3e07c9c71481f78214812053d93f62bff83 100644 (file)
@@ -41,7 +41,6 @@ struct imx_drm_device {
        struct list_head                        encoder_list;
        struct list_head                        connector_list;
        struct mutex                            mutex;
-       int                                     references;
        int                                     pipes;
        struct drm_fbdev_cma                    *fbhelper;
 };
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
                }
        }
 
-       imxdrm->references++;
-
        return imxdrm->drm;
 
 unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
        list_for_each_entry(enc, &imxdrm->encoder_list, list)
                module_put(enc->owner);
 
-       imxdrm->references--;
-
        mutex_unlock(&imxdrm->mutex);
 }
 EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@ -805,6 +800,12 @@ static struct drm_driver imx_drm_driver = {
 
 static int imx_drm_platform_probe(struct platform_device *pdev)
 {
+       int ret;
+
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        imx_drm_device->dev = &pdev->dev;
 
        return drm_platform_init(&imx_drm_driver, pdev);
@@ -847,8 +848,6 @@ static int __init imx_drm_init(void)
                goto err_pdev;
        }
 
-       imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
        ret = platform_driver_register(&imx_drm_pdrv);
        if (ret)
                goto err_pdrv;
index 6fd37a7453e9691ac573c414d396e69357e7b38f..9e73e8d8c9aaa350267ac25f0926a5cd8d7c6416 100644 (file)
@@ -523,7 +523,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
        if (!pdata)
                return -EINVAL;
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
        if (!ipu_crtc)
index 2f44d56700af1eabe13625777b3cc3ce8c94cf20..776d3632dc7d52988738b105d648161b2e99b9a2 100644 (file)
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
        struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
        struct usb_line6_toneport *toneport =
            (struct usb_line6_toneport *)line6pcm->line6;
+       unsigned int source;
 
-       if (ucontrol->value.enumerated.item[0] == toneport->source)
+       source = ucontrol->value.enumerated.item[0];
+       if (source >= ARRAY_SIZE(toneport_source_info))
+               return -EINVAL;
+       if (source == toneport->source)
                return 0;
 
-       toneport->source = ucontrol->value.enumerated.item[0];
+       toneport->source = source;
        toneport_send_cmd(toneport->line6.usbdev,
-                         toneport_source_info[toneport->source].code, 0x0000);
+                         toneport_source_info[source].code, 0x0000);
        return 1;
 }
 
index 086ca3d7241b2b756a650bc647ab9450461f1a4a..26b49a24b3dfe32997e7d37cb479f2d98b91ce82 100644 (file)
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 int
 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       struct task_struct *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, "%s", name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 2c581b7fa8adee363d19791007f4696e9e439d06..68a4f52ec998c14795d6f356e807b798c2dfa794 100644 (file)
@@ -1005,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 int
 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       struct task_struct *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, "%s", name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 4e898e4918605bf058554d6cae15b6a5e31e550a..2156a44d07409c0501cba726ccb33ee99d1d9c92 100644 (file)
@@ -1,6 +1,6 @@
 config LUSTRE_FS
        tristate "Lustre file system client support"
-       depends on INET && m
+       depends on INET && m && !MIPS && !XTENSA && !SUPERH
        select LNET
        select CRYPTO
        select CRYPTO_CRC32
@@ -52,7 +52,7 @@ config LUSTRE_DEBUG_EXPENSIVE_CHECK
 config LUSTRE_TRANSLATE_ERRNOS
        bool
        depends on LUSTRE_FS && !X86
-       default true
+       default y
 
 config LUSTRE_LLITE_LLOOP
        bool "Lustre virtual block device"
index 3916bda3004cf23c7fa58c7e51de5e621909cfc5..a100a0b96381d6ef0a55242c969ab41b7adb05be 100644 (file)
@@ -800,9 +800,9 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
 
        init_completion(&bltd.bltd_comp);
        bltd.bltd_num = atomic_read(&blp->blp_num_threads);
-       snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1,
+       snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
                "ldlm_bl_%02d", bltd.bltd_num);
-       task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name);
+       task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
        if (IS_ERR(task)) {
                CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
                       atomic_read(&blp->blp_num_threads), PTR_ERR(task));
index 462172d1a7569a74c1ffbae3dd0c0f133574aa2e..1a55c81892e0e8f692c167d17f0296814ad806dc 100644 (file)
@@ -397,7 +397,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                                 sched->ws_name, sched->ws_nthreads);
                }
 
-               task = kthread_run(cfs_wi_scheduler, sched, name);
+               task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
                if (!IS_ERR(task)) {
                        nthrs--;
                        continue;
index 2644edf438c1e175b016a54e96b6d93bd5852cc7..c8b43442dc74be8c8cf1ee3f5b38470f63617e71 100644 (file)
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
        if (nob > ulsm_nob)
                return (-EINVAL);
 
-       if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
+       if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
                return (-EFAULT);
 
        for (i = 0; i < lsm->lsm_stripe_count; i++) {
index 227a0ae9593bc987d6bfbcf1757bc0a9eacf2288..5dec771d70eee8c08a6bc0b787f7f116ddd6906d 100644 (file)
@@ -383,8 +383,8 @@ int ptlrpc_start_pinger(void)
 
        /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
         * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
-       rc = PTR_ERR(kthread_run(ptlrpc_pinger_main,
-                                &pinger_thread, pinger_thread.t_name));
+       rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
+                                "%s", pinger_thread.t_name));
        if (IS_ERR_VALUE(rc)) {
                CERROR("cannot start thread: %d\n", rc);
                return rc;
index fbdeff65d059df66f057cec95f729a5d3ad6ad63..89c9be96f454a57c4dd0839cf92f378883dce91e 100644 (file)
@@ -615,7 +615,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
        init_completion(&pc->pc_starting);
        init_completion(&pc->pc_finishing);
        spin_lock_init(&pc->pc_lock);
-       strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
+       strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
        pc->pc_set = ptlrpc_prep_set();
        if (pc->pc_set == NULL)
                GOTO(out, rc = -ENOMEM);
@@ -638,7 +638,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
                                GOTO(out, rc);
                }
 
-               task = kthread_run(ptlrpcd, pc, pc->pc_name);
+               task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
                if (IS_ERR(task))
                        GOTO(out, rc = PTR_ERR(task));
 
@@ -745,7 +745,7 @@ static int ptlrpcd_init(void)
        if (ptlrpcds == NULL)
                GOTO(out, rc = -ENOMEM);
 
-       snprintf(name, 15, "ptlrpcd_rcv");
+       snprintf(name, sizeof(name), "ptlrpcd_rcv");
        set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
        rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
        if (rc < 0)
@@ -764,7 +764,7 @@ static int ptlrpcd_init(void)
         *      unnecessary dependency. But how to distribute async RPCs load
         *      among all the ptlrpc daemons becomes another trouble. */
        for (i = 0; i < nthreads; i++) {
-               snprintf(name, 15, "ptlrpcd_%d", i);
+               snprintf(name, sizeof(name), "ptlrpcd_%d", i);
                rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
                if (rc < 0)
                        GOTO(out, rc);
index e90c8fb7da6a73643531c1cd1914d5826a280c2d..6547f46a7729f2f95c16f9bdff5e334a43806eea 100644 (file)
@@ -59,8 +59,8 @@
  ****************************************/
 
 
-#define PTRS_PER_PAGE   (PAGE_CACHE_SIZE / sizeof(void *))
-#define PAGES_PER_POOL  (PTRS_PER_PAGE)
+#define POINTERS_PER_PAGE      (PAGE_CACHE_SIZE / sizeof(void *))
+#define PAGES_PER_POOL         (POINTERS_PER_PAGE)
 
 #define IDLE_IDX_MAX       (100)
 #define IDLE_IDX_WEIGHT         (3)
index ac8b5fd2300b5720e322137079895bdda5f71b52..acf75f3873d1ffb0ab9cc4258150b3a9a1b2e3ba 100644 (file)
@@ -2718,15 +2718,15 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        spin_unlock(&svcpt->scp_lock);
 
        if (svcpt->scp_cpt >= 0) {
-               snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
+               snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
                         svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
        } else {
-               snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
+               snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
                         svc->srv_thread_name, thread->t_id);
        }
 
        CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
-       rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
+       rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
        if (IS_ERR_VALUE(rc)) {
                CERROR("cannot start thread '%s': rc %d\n",
                       thread->t_name, rc);
index 90d6ac46935587fb905e910e39a7a52002888f43..081407be33ab5b6cdec1a01dbfb561bd53056c78 100644 (file)
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int err;
        struct dt3155_priv *pd;
 
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-       if (err)
-               return -ENODEV;
-       err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        if (err)
                return -ENODEV;
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
index b94a95a597d63ec6428f2b92d460fe208465d351..76d5bbd4d93c38f3e25274f8d7b337eb808f1b7d 100644 (file)
@@ -1,3 +1,4 @@
 config USB_MSI3101
        tristate "Mirics MSi3101 SDR Dongle"
        depends on USB && VIDEO_DEV && VIDEO_V4L2
+        select VIDEOBUF2_VMALLOC
index 24c7b70a6cbf401696ee3faba21047d68a75f6e2..4c3bf776bb207b755c9a5bde50aa32138a6ff09a 100644 (file)
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
        /* Absolute min and max number of buffers available for mmap() */
        *nbuffers = 32;
        *nplanes = 1;
-       sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+       /*
+        *   3, wMaxPacketSize 3x 1024 bytes
+        * 504, max IQ sample pairs per 1024 frame
+        *   2, two samples, I and Q
+        *   4, 32-bit float
+        */
+       sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
        dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
                        __func__, *nbuffers, sizes[0]);
        return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
                        f->frequency * 625UL / 10UL);
 }
 
-const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
        .vidioc_querycap          = msi3101_querycap,
 
        .vidioc_enum_input        = msi3101_enum_input,
index a4c589604b028d7d74e1a7f895b6ab3aecbeac75..9a6d5c0b13396c2ad8715508139d49ac1f0952b7 100644 (file)
@@ -346,7 +346,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma,
 /* Build a descriptor queue out of an SG list and send it to the P2M for
  * processing. */
 static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
-                         struct vb2_dma_sg_desc *vbuf, int off, int size,
+                         struct sg_table *vbuf, int off, int size,
                          unsigned int base, unsigned int base_size)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
@@ -359,7 +359,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
 
        solo_enc->desc_count = 1;
 
-       for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) {
+       for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
                struct solo_p2m_desc *desc;
                dma_addr_t dma;
                int len;
@@ -434,7 +434,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
                struct vb2_buffer *vb, struct vop_header *vh)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
        int frame_size;
        int ret;
 
@@ -443,7 +443,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
        if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len)
                return -EIO;
 
-       sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
+       sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
                        solo_enc->jpeg_header,
                        solo_enc->jpeg_len);
 
@@ -451,12 +451,12 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
                & ~(DMA_ALIGN - 1);
        vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len);
 
-       dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off,
                        frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
                        SOLO_JPEG_EXT_SIZE(solo_dev));
-       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        return ret;
 }
@@ -465,7 +465,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
                struct vb2_buffer *vb, struct vop_header *vh)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
        int frame_off, frame_size;
        int skip = 0;
        int ret;
@@ -475,7 +475,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
 
        /* If this is a key frame, add extra header */
        if (!vh->vop_type) {
-               sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
+               sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
                                solo_enc->vop,
                                solo_enc->vop_len);
 
@@ -494,12 +494,12 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
        frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1))
                & ~(DMA_ALIGN - 1);
 
-       dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
                        SOLO_MP4E_EXT_ADDR(solo_dev),
                        SOLO_MP4E_EXT_SIZE(solo_dev));
-       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        return ret;
 }
index d7b3c82b5ead023bcc4ba85acdd0e307b0eecdc9..45dfe94199ae4ecd951a8142fb6f9140d226942c 100644 (file)
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
                        }
        }
 
-       memset(usb, 0, sizeof(usb));
+       memset(usb, 0, sizeof(*usb));
        usb->init_flags = flags;
 
        /* Initialize the USB state structure */
index 3605c5da822d953daa6275dda23b675e13b60d5c..6fc77428e83a0c389c51e6cca52ceaa6a8ff7dd9 100644 (file)
@@ -157,8 +157,8 @@ _func_enter_;
 
        *frlen = *frlen + (len + 2);
 
-       return pbuf + len + 2;
 _func_exit_;
+       return pbuf + len + 2;
 }
 
 inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
index 8b2ba26ba38d9a44c090167cc9bd862367e92ade..4b2eb8e9b5620ab4529ec9ce8af252ef502a65fc 100644 (file)
@@ -1827,13 +1827,13 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
 
 #ifdef CONFIG_88EU_P2P
 
-static int get_reg_classes_full_count(struct p2p_channels channel_list)
+static int get_reg_classes_full_count(struct p2p_channels *channel_list)
 {
        int cnt = 0;
        int i;
 
-       for (i = 0; i < channel_list.reg_classes; i++) {
-               cnt += channel_list.reg_class[i].channels;
+       for (i = 0; i < channel_list->reg_classes; i++) {
+               cnt += channel_list->reg_class[i].channels;
        }
 
        return cnt;
@@ -2065,7 +2065,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
        p2pielen += 2;
@@ -2437,7 +2437,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
 
@@ -2859,7 +2859,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
 
@@ -3120,7 +3120,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
                /*  + number of channels in all classes */
                len_channellist_attr = 3
                        + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-                       + get_reg_classes_full_count(pmlmeext->channel_list);
+                       + get_reg_classes_full_count(&pmlmeext->channel_list);
 
                *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
                p2pielen += 2;
index c7ff2e4d1f23fcc4567c747bf97beff945886862..9832dcbbd07fa621467dfc1c216411e627db7e66 100644 (file)
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
                sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
        }
 
-       _rtw_memset(data, '\0', sizeof(data));
+       _rtw_memset(data, '\0', sizeof(*data));
 
        i = psd_start;
        while (i < psd_stop) {
index 013ea487e7acc2e57726b82e6e016d4aebf60b34..8018edd3d42e19fcc0875c7324584d94e6fb02ad 100644 (file)
@@ -631,7 +631,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
        inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
 
        if (pregpriv->wifi_spec == 1) {
-               u32     j, tmp, change_inx;
+               u32     j, tmp, change_inx = false;
 
                /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
                for (i = 0; i < 4; i++) {
index 9c2e7a20c09ea1ced218d303db5e1213837be751..ec0028d4e61a495cf577efe82fd7bffa0455fbbd 100644 (file)
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
        u8 cut_ver, fab_ver;
 
        /*  Init Value */
-       _rtw_memset(dm_odm, 0, sizeof(dm_odm));
+       _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
 
        dm_odm->Adapter = Adapter;
 
index 2bfe72841921bd8fc76ec684130f691e5466e50a..4787bacdcad892da62bf66c05cadb851e090e09d 100644 (file)
@@ -1010,7 +1010,7 @@ enum dm_dig_op {
 #define                DM_false_ALARM_THRESH_LOW       400
 #define                DM_false_ALARM_THRESH_HIGH      1000
 
-#define                DM_DIG_MAX_NIC                  0x3e
+#define                DM_DIG_MAX_NIC                  0x4e
 #define                DM_DIG_MIN_NIC                  0x1e /* 0x22/0x1c */
 
 #define                DM_DIG_MAX_AP                   0x32
index 52b280165a926da8acccb981e539c787c6c422b9..555c801d2ded7e434a5a86335449d5be377796ee 100644 (file)
@@ -188,7 +188,7 @@ enum ChannelPlan {
 
 struct txpowerinfo24g {
        u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
-       u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1];
+       u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
        /* If only one tx, only BW20 and OFDM are used. */
        s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
        s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
index a96b018e5e6a3d7208867684983a58ede36c8e82..853ab80a2b860a0a95bfab8d85ba064ca4eef982 100644 (file)
@@ -870,6 +870,7 @@ static struct fwevent wlanevents[] = {
        {0, NULL},
        {0, NULL},
        {0, &rtw_cpwm_event_callback},
+       {0, NULL},
 };
 
 #endif/* _RTL_MLME_EXT_C_ */
index cd4100fb3645ffd1cc2fcb8ec1013c1fa76d30da..95953ebc027922e66ffd3168a965dee33f6a30be 100644 (file)
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
        stop = strncmp(extra, "stop", 4);
        sscanf(extra, "count =%d, pkt", &count);
 
-       _rtw_memset(extra, '\0', sizeof(extra));
+       _rtw_memset(extra, '\0', sizeof(*extra));
 
        if (stop == 0) {
                bStartTest = 0; /*  To set Stop */
index d3078d200e505f4c4f24237eb202d80ae6e9cd69..9ca3180ebaa0e70fbb188c84748971fd4f34922d 100644 (file)
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        /*=== Customer ID ===*/
        /****** 8188EUS ********/
        {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+       {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {}      /* Terminating entry */
 };
 
index 5bc361b16d4ca05e924bd3a326824c852a67b8f0..56144014b7c9ba15126f97b073fb6eea4777937a 100644 (file)
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
        /* Get TCB and local buffer from common pool.
           (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
        skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
+       if (!skb)
+               return RT_STATUS_FAILURE;
        memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
        tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
        tcb_desc->queue_index = TXCMD_QUEUE;
index dbf11ecb794ec60dd576578f9f5172d0f4448f10..19d3cf451b880c736c82cce806fdfb14d98b6379 100644 (file)
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
        if (!CARDbIsOFDMinBasicRate(pDevice)) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
                        "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
-       if (wRateIdx > RATE_24M)
-               wRateIdx = RATE_24M;
+               if (wRateIdx > RATE_24M)
+                       wRateIdx = RATE_24M;
                return wRateIdx;
        }
 
index d0cf7d8a20e5640fe507f3ff1b1dc64c170f9408..8872e0f84f40e825efec5dd7f7614f6c22f43562 100644 (file)
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
        if (pMgmt == NULL)
                return -EFAULT;
 
+       if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
+               return -ENODEV;
+
        buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index 536971786ae8e4cb87c27b4705d006df0d5cbee6..6f9d28182445ce99b2d37bcd37aaae3c3d2bdc03 100644 (file)
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
     memset(pMgmt->abyCurrBSSID, 0, 6);
     pMgmt->eCurrState = WMAC_STATE_IDLE;
 
+       pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
     device_free_tx_bufs(pDevice);
     device_free_rx_bufs(pDevice);
     device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
     usb_free_urb(pDevice->pInterruptURB);
 
     BSSvClearNodeDBTable(pDevice, 0);
-    pDevice->flags &=(~DEVICE_FLAGS_OPENED);
 
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
 
index fb743a8811bbc9c0c8c37d23bde37a96b863d735..14f3e852215da5fb27702b4b91687047b9a290c2 100644 (file)
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
 
     for (ii = 0; ii < pDevice->cbTD; ii++) {
+       if (!pDevice->apTD[ii])
+               return NULL;
         pContext = pDevice->apTD[ii];
         if (pContext->bBoolInUse == false) {
             pContext->bBoolInUse = true;
index efc56987a60b53a0a409277da639a95b128f5304..7db6f03a00540cd8c2a131ab7039fec6251b72cc 100644 (file)
@@ -2054,7 +2054,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
                                       NULL,
                                       MKDEV(major, i),
                                       NULL,
-                                      devname);
+                                      "%s", devname);
 
                if (IS_ERR(device)) {
                        pr_warn("xillybus: Failed to create %s "
index 91d94b564433ad2aa3f8b6b5091ca26d03459477..2c4ed52ca849d45b385cbeed0d88a545d4a5fa5d 100644 (file)
@@ -981,4 +981,3 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
 MODULE_DESCRIPTION("Compressed RAM Block Device");
-MODULE_ALIAS("devname:zram");
index 35b61f7d6c6305f79535c3c9e7d8dbbd8bd788dc..38e44b9abf0f145eacde1e9b90dcf26c45ca2633 100644 (file)
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
 
 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
 {
-       struct iscsi_cmd *cmd;
+       LIST_HEAD(ack_list);
+       struct iscsi_cmd *cmd, *cmd_p;
 
        conn->exp_statsn = exp_statsn;
 
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
                return;
 
        spin_lock_bh(&conn->cmd_lock);
-       list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+       list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
                spin_lock(&cmd->istate_lock);
                if ((cmd->i_state == ISTATE_SENT_STATUS) &&
                    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
                        cmd->i_state = ISTATE_REMOVE;
                        spin_unlock(&cmd->istate_lock);
-                       iscsit_add_cmd_to_immediate_queue(cmd, conn,
-                                               cmd->i_state);
+                       list_move_tail(&cmd->i_conn_node, &ack_list);
                        continue;
                }
                spin_unlock(&cmd->istate_lock);
        }
        spin_unlock_bh(&conn->cmd_lock);
+
+       list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+               list_del(&cmd->i_conn_node);
+               iscsit_free_cmd(cmd, false);
+       }
 }
 
 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
index 14d1aed5af1dbef98aa9b05ad31573b08bdf72ed..ef6d836a4d09745d57ba42e2496dd725f85c57ea 100644 (file)
@@ -1192,7 +1192,7 @@ get_target:
         */
 alloc_tags:
        tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
-       tag_num += ISCSIT_EXTRA_TAGS;
+       tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
        tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
 
        ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
index f2de28e178fd738cd93dce94ad7980422749f9d2..b0cac0c342e1e83a9da5dc7b5f01edd48cb12136 100644 (file)
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+               rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
                        __iscsit_free_cmd(cmd, true, shutdown);
                        target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
                        se_cmd = &cmd->se_cmd;
                        __iscsit_free_cmd(cmd, true, shutdown);
 
-                       rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                        if (!rc && shutdown && se_cmd->se_sess) {
                                __iscsit_free_cmd(cmd, true, shutdown);
                                target_put_sess_cmd(se_cmd->se_sess, se_cmd);
index 6c17295e8d7ca9db7a0d262a087ce81ffaa1091c..4714c6f8da4be5fe39e797237fa8a35de0450883 100644 (file)
@@ -349,7 +349,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+       /*
+        * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
+        * within target_complete_ok_work() if the command was successfully
+        * sent to the backend driver.
+        */
+       spin_lock_irq(&cmd->t_state_lock);
+       if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
+               cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+       spin_unlock_irq(&cmd->t_state_lock);
+
        /*
         * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
         * before the original READ I/O submission.
@@ -363,7 +372,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct scatterlist *write_sg = NULL, *sg;
-       unsigned char *buf, *addr;
+       unsigned char *buf = NULL, *addr;
        struct sg_mapping_iter m;
        unsigned int offset = 0, len;
        unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +387,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
         */
        if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
                return TCM_NO_SENSE;
+       /*
+        * Immediately exit + release dev->caw_sem if command has already
+        * been failed with a non-zero SCSI status.
+        */
+       if (cmd->scsi_status) {
+               pr_err("compare_and_write_callback: non zero scsi_status:"
+                       " 0x%02x\n", cmd->scsi_status);
+               goto out;
+       }
 
        buf = kzalloc(cmd->data_length, GFP_KERNEL);
        if (!buf) {
@@ -508,6 +526,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
                cmd->transport_complete_callback = NULL;
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
+       /*
+        * Reset cmd->data_length to individual block_size in order to not
+        * confuse backend drivers that depend on this value matching the
+        * size of the I/O being submitted.
+        */
+       cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
 
        ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
                              DMA_FROM_DEVICE);
index 84747cc1aac0aa4d001dda34bc22d09c52685362..81e945eefbbdd0572181d84e3cd9d014895e4eb7 100644 (file)
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
 {
        int rc;
 
-       se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL);
+       se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
+                                       GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
        if (!se_sess->sess_cmd_map) {
-               pr_err("Unable to allocate se_sess->sess_cmd_map\n");
-               return -ENOMEM;
+               se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
+               if (!se_sess->sess_cmd_map) {
+                       pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+                       return -ENOMEM;
+               }
        }
 
        rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
        if (rc < 0) {
                pr_err("Unable to init se_sess->sess_tag_pool,"
                        " tag_num: %u\n", tag_num);
-               kfree(se_sess->sess_cmd_map);
+               if (is_vmalloc_addr(se_sess->sess_cmd_map))
+                       vfree(se_sess->sess_cmd_map);
+               else
+                       kfree(se_sess->sess_cmd_map);
                se_sess->sess_cmd_map = NULL;
                return -ENOMEM;
        }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
 {
        if (se_sess->sess_cmd_map) {
                percpu_ida_destroy(&se_sess->sess_tag_pool);
-               kfree(se_sess->sess_cmd_map);
+               if (is_vmalloc_addr(se_sess->sess_cmd_map))
+                       vfree(se_sess->sess_cmd_map);
+               else
+                       kfree(se_sess->sess_cmd_map);
        }
        kmem_cache_free(se_sess_cache, se_sess);
 }
index 4d22e7d2adcae0a048684613c3b3dc95c6f9874e..3da4fd10b9f820b6409980055ae19a2b3607f713 100644 (file)
@@ -298,8 +298,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
                (unsigned long long)xop->dst_lba);
 
        if (dc != 0) {
-               xop->dbl = (desc[29] << 16) & 0xff;
-               xop->dbl |= (desc[30] << 8) & 0xff;
+               xop->dbl = (desc[29] & 0xff) << 16;
+               xop->dbl |= (desc[30] & 0xff) << 8;
                xop->dbl |= desc[31] & 0xff;
 
                pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
index dbfc390330acf2ee8bc6af339ff22f3b6fc3ec77..f35a1f75b15b87f2f37ea0bf671894acf549c207 100644 (file)
@@ -56,7 +56,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
        select THERMAL_GOV_USER_SPACE
        help
          Select this if you want to let the user space manage the
-         lpatform thermals.
+         platform thermals.
 
 endchoice
 
@@ -69,6 +69,7 @@ config THERMAL_GOV_STEP_WISE
        bool "Step_wise thermal governor"
        help
          Enable this to manage platform thermals using a simple linear
+         governor.
 
 config THERMAL_GOV_USER_SPACE
        bool "User_space thermal governor"
@@ -78,7 +79,6 @@ config THERMAL_GOV_USER_SPACE
 config CPU_THERMAL
        bool "generic cpu cooling support"
        depends on CPU_FREQ
-       select CPU_FREQ_TABLE
        help
          This implements the generic cpu cooling mechanism through frequency
          reduction. An ACPI version of this already exists
@@ -117,14 +117,14 @@ config SPEAR_THERMAL
        depends on OF
        help
          Enable this to plug the SPEAr thermal sensor driver into the Linux
-         thermal framework
+         thermal framework.
 
 config RCAR_THERMAL
        tristate "Renesas R-Car thermal driver"
        depends on ARCH_SHMOBILE
        help
          Enable this to plug the R-Car thermal sensor driver into the Linux
-         thermal framework
+         thermal framework.
 
 config KIRKWOOD_THERMAL
        tristate "Temperature sensor on Marvell Kirkwood SoCs"
index b40b37cd25e08b7b2353b6ef6d85c321ad2e08eb..8f181b3f842b3b04b12d972da58d8f238b6a7a00 100644 (file)
@@ -675,6 +675,11 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x2e},
        { X86_VENDOR_INTEL, 6, 0x2f},
        { X86_VENDOR_INTEL, 6, 0x3a},
+       { X86_VENDOR_INTEL, 6, 0x3c},
+       { X86_VENDOR_INTEL, 6, 0x3e},
+       { X86_VENDOR_INTEL, 6, 0x3f},
+       { X86_VENDOR_INTEL, 6, 0x45},
+       { X86_VENDOR_INTEL, 6, 0x46},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -758,21 +763,39 @@ static int powerclamp_init(void)
        /* probe cpu features and ids here */
        retval = powerclamp_probe();
        if (retval)
-               return retval;
+               goto exit_free;
+
        /* set default limit, maybe adjusted during runtime based on feedback */
        window_size = 2;
        register_hotcpu_notifier(&powerclamp_cpu_notifier);
+
        powerclamp_thread = alloc_percpu(struct task_struct *);
+       if (!powerclamp_thread) {
+               retval = -ENOMEM;
+               goto exit_unregister;
+       }
+
        cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
                                                &powerclamp_cooling_ops);
-       if (IS_ERR(cooling_dev))
-               return -ENODEV;
+       if (IS_ERR(cooling_dev)) {
+               retval = -ENODEV;
+               goto exit_free_thread;
+       }
 
        if (!duration)
                duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
        powerclamp_create_debug_files();
 
        return 0;
+
+exit_free_thread:
+       free_percpu(powerclamp_thread);
+exit_unregister:
+       unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+exit_free:
+       kfree(cpu_clamping_mask);
+       return retval;
 }
 module_init(powerclamp_init);
 
index f10a6ad37c0609fd2383061d1c4f651bb9cde36b..c2301da08ac7bb0958755b5f2b1291447f4aae6a 100644 (file)
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
        }
 
        th_zone = conf->pzone_data;
-       if (th_zone->therm_dev)
-               return;
 
        if (th_zone->bind == false) {
                for (i = 0; i < th_zone->cool_dev_size; i++) {
index b43afda8acd163085a11eaeb851fb1869403ed4b..32f38b90c4f6a6705b44e8381ee339c2382e47bb 100644 (file)
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
 
        con = readl(data->base + reg->tmu_ctrl);
 
+       if (pdata->test_mux)
+               con |= (pdata->test_mux << reg->test_mux_addr_shift);
+
        if (pdata->reference_voltage) {
                con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
                con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
        },
        {
                .compatible = "samsung,exynos4412-tmu",
-               .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+               .data = (void *)EXYNOS4412_TMU_DRV_DATA,
        },
        {
                .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (pdata->type == SOC_ARCH_EXYNOS ||
-               pdata->type == SOC_ARCH_EXYNOS4210 ||
-                               pdata->type == SOC_ARCH_EXYNOS5440)
+       if (pdata->type == SOC_ARCH_EXYNOS4210 ||
+           pdata->type == SOC_ARCH_EXYNOS4412 ||
+           pdata->type == SOC_ARCH_EXYNOS5250 ||
+           pdata->type == SOC_ARCH_EXYNOS5440)
                data->soc = pdata->type;
        else {
                ret = -EINVAL;
index b364c9eee70103a622a852096295f4e3339b474f..3fb65547e64c9d9e3d3410f1c0ea729fb5a9332e 100644 (file)
@@ -41,7 +41,8 @@ enum calibration_mode {
 
 enum soc_type {
        SOC_ARCH_EXYNOS4210 = 1,
-       SOC_ARCH_EXYNOS,
+       SOC_ARCH_EXYNOS4412,
+       SOC_ARCH_EXYNOS5250,
        SOC_ARCH_EXYNOS5440,
 };
 
@@ -84,6 +85,7 @@ enum soc_type {
  * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
        reg.
  * @tmu_ctrl: TMU main controller register.
+ * @test_mux_addr_shift: shift bits of test mux address.
  * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
  * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
  * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
        u32     triminfo_reload_shift;
 
        u32     tmu_ctrl;
+       u32     test_mux_addr_shift;
        u32     buf_vref_sel_shift;
        u32     buf_vref_sel_mask;
        u32     therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
  * @first_point_trim: temp value of the first point trimming
  * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
+ * @test_mux; information if SoC supports test MUX
  * @cal_type: calibration type for temperature
  * @cal_mode: calibration mode for temperature
  * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
        u8 first_point_trim;
        u8 second_point_trim;
        u8 default_temp_offset;
+       u8 test_mux;
 
        enum calibration_type cal_type;
        enum calibration_mode cal_mode;
index 9002499c1f69289350f2e7edd813716919927083..073c292baa53e12d0bc7c6aef2a8956a81ea4a6d 100644 (file)
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
 };
 #endif
 
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
-static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
+static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
        .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
        .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
        .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+       .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
        .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
        .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
        .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
-#define EXYNOS5250_TMU_DATA \
+#define EXYNOS4412_TMU_DATA \
        .threshold_falling = 10, \
        .trigger_levels[0] = 85, \
        .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
                .temp_level = 103, \
        }, \
        .freq_tab_count = 2, \
-       .type = SOC_ARCH_EXYNOS, \
-       .registers = &exynos5250_tmu_registers, \
+       .registers = &exynos4412_tmu_registers, \
        .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
                        TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
                        TMU_SUPPORT_EMUL_TIME)
+#endif
 
+#if defined(CONFIG_SOC_EXYNOS4412)
+struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
+       .tmu_data = {
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS4412,
+                       .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
+               },
+       },
+       .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
        .tmu_data = {
-               { EXYNOS5250_TMU_DATA },
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS5250,
+               },
        },
        .tmu_count = 1,
 };
index dc7feb51099b449bb680e5b9237966b8eb47295e..a1ea19d9e0a6e6bfee1a80f173dcdf4786dcc284 100644 (file)
 
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE          6
+#define EXYNOS4412_MUX_ADDR_SHIFT          20
+
 /*exynos5440 specific registers*/
 #define EXYNOS5440_TMU_S0_7_TRIM               0x000
 #define EXYNOS5440_TMU_S0_7_CTRL               0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
 #define EXYNOS4210_TMU_DRV_DATA (NULL)
 #endif
 
-#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+#if defined(CONFIG_SOC_EXYNOS4412)
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
+#else
+#define EXYNOS4412_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
 #define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
 #else
index eeef0e2498caa39edc55cd3cf6345953a537f626..fdb07199d9c2693b8ae91af6d626f588ed21350b 100644 (file)
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register(&tz->device);
+       hwmon->device = hwmon_device_register(NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
index 4f8b9af54a5a75d1de884a342920ccb50f9ba869..5a47cc8c8f85ae1771c3faa1042b8f068978d570 100644 (file)
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
                } else {
                        dev_err(bgp->dev,
                                "Failed to read PCB state. Using defaults\n");
+                       ret = 0;
                }
        }
        *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
index f36950e4134f55deb02e3a6eb172e9481828ea18..7722cb9d5a8020076afe549192b198542461f352 100644 (file)
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
        int phy_id = topology_physical_package_id(cpu);
        struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
        bool notify = false;
+       unsigned long flags;
 
        if (!phdev)
                return;
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        ++pkg_work_cnt;
        if (unlikely(phy_id > max_phy_id)) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                return;
        }
        pkg_work_scheduled[phy_id] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        enable_pkg_thres_interrupt();
        rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
        int thres_count;
        u32 eax, ebx, ecx, edx;
        u8 *temp;
+       unsigned long flags;
 
        cpuid(6, &eax, &ebx, &ecx, &edx);
        thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
                goto err_ret_unlock;
        }
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        if (topology_physical_package_id(cpu) > max_phy_id)
                max_phy_id = topology_physical_package_id(cpu);
        temp = krealloc(pkg_work_scheduled,
                        (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
        if (!temp) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                err = -ENOMEM;
                goto err_ret_free;
        }
        pkg_work_scheduled = temp;
        pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
        phy_dev_entry->first_cpu = cpu;
index cd69b48f6dfd390a8231f689b35f5d2588c16772..6496872e2e47c34c29ca75c79ce4d56bf7191146 100644 (file)
@@ -329,7 +329,7 @@ static void udbg_init_opal_common(void)
 void __init hvc_opal_init_early(void)
 {
        struct device_node *stdout_node = NULL;
-       const u32 *termno;
+       const __be32 *termno;
        const char *name = NULL;
        const struct hv_ops *ops;
        u32 index;
@@ -371,7 +371,7 @@ void __init hvc_opal_init_early(void)
        if (!stdout_node)
                return;
        termno = of_get_property(stdout_node, "reg", NULL);
-       index = termno ? *termno : 0;
+       index = termno ? be32_to_cpup(termno) : 0;
        if (index >= MAX_NR_HVC_CONSOLES)
                return;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index e61c36cbb866474841f9bdf92d934d3c5ece137a..c193af6a628f45758a7fcd59b1b0823e57327655 100644 (file)
@@ -636,6 +636,7 @@ struct console xenboot_console = {
        .name           = "xenboot",
        .write          = xenboot_write_console,
        .flags          = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
+       .index          = -1,
 };
 #endif /* CONFIG_EARLY_PRINTK */
 
index ac2767100df56d3ef071f565ddfc7f4063432ca7..347050ea414a7f37e3962869f29437129cea1c75 100644 (file)
@@ -9,7 +9,7 @@
 
 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
 {
-       packet->seqno = atomic_inc_return(&pv->seqno);
+       packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
 
        /* Assumes that always succeeds, works in practice */
        return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -28,7 +28,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
        /* Send version query */
        q.hdr.type = VS_QUERY_PACKET_HEADER;
        q.hdr.len = sizeof(struct hvsi_query);
-       q.verb = VSV_SEND_VERSION_NUMBER;
+       q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
        hvsi_send_packet(pv, &q.hdr);
 }
 
@@ -40,7 +40,7 @@ static int hvsi_send_close(struct hvsi_priv *pv)
 
        ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
        ctrl.hdr.len = sizeof(struct hvsi_control);
-       ctrl.verb = VSV_CLOSE_PROTOCOL;
+       ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
        return hvsi_send_packet(pv, &ctrl.hdr);
 }
 
@@ -69,14 +69,14 @@ static void hvsi_got_control(struct hvsi_priv *pv)
 {
        struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
 
-       switch (pkt->verb) {
+       switch (be16_to_cpu(pkt->verb)) {
        case VSV_CLOSE_PROTOCOL:
                /* We restart the handshaking */
                hvsi_start_handshake(pv);
                break;
        case VSV_MODEM_CTL_UPDATE:
                /* Transition of carrier detect */
-               hvsi_cd_change(pv, pkt->word & HVSI_TSCD);
+               hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
                break;
        }
 }
@@ -87,7 +87,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
        struct hvsi_query_response r;
 
        /* We only handle version queries */
-       if (pkt->verb != VSV_SEND_VERSION_NUMBER)
+       if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
                return;
 
        pr_devel("HVSI@%x: Got version query, sending response...\n",
@@ -96,7 +96,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
        /* Send version response */
        r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
        r.hdr.len = sizeof(struct hvsi_query_response);
-       r.verb = VSV_SEND_VERSION_NUMBER;
+       r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
        r.u.version = HVSI_VERSION;
        r.query_seqno = pkt->hdr.seqno;
        hvsi_send_packet(pv, &r.hdr);
@@ -112,7 +112,7 @@ static void hvsi_got_response(struct hvsi_priv *pv)
 
        switch(r->verb) {
        case VSV_SEND_MODEM_CTL_STATUS:
-               hvsi_cd_change(pv, r->u.mctrl_word & HVSI_TSCD);
+               hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD);
                pv->mctrl_update = 1;
                break;
        }
@@ -265,8 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
        pv->mctrl_update = 0;
        q.hdr.type = VS_QUERY_PACKET_HEADER;
        q.hdr.len = sizeof(struct hvsi_query);
-       q.hdr.seqno = atomic_inc_return(&pv->seqno);
-       q.verb = VSV_SEND_MODEM_CTL_STATUS;
+       q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
        rc = hvsi_send_packet(pv, &q.hdr);
        if (rc <= 0) {
                pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
@@ -304,9 +303,9 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
 
        ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
        ctrl.hdr.len = sizeof(struct hvsi_control);
-       ctrl.verb = VSV_SET_MODEM_CTL;
-       ctrl.mask = HVSI_TSDTR;
-       ctrl.word = dtr ? HVSI_TSDTR : 0;
+       ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+       ctrl.mask = cpu_to_be32(HVSI_TSDTR);
+       ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0);
        return hvsi_send_packet(pv, &ctrl.hdr);
 }
 
index c9a9ddd1d0bc2aa5091d7e1678920df2d9292429..7a744b69c3d144516388d3e8a4bc2fc440fd4d34 100644 (file)
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
        if (canon_change) {
                bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
-               ldata->line_start = 0;
-               ldata->canon_head = ldata->read_tail;
+               ldata->line_start = ldata->canon_head = ldata->read_tail;
                ldata->erasing = 0;
                ldata->lnext = 0;
        }
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
 
                if (!input_available_p(tty, 0)) {
                        if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-                               retval = -EIO;
-                               break;
-                       }
-                       if (tty_hung_up_p(file))
-                               break;
-                       if (!timeout)
-                               break;
-                       if (file->f_flags & O_NONBLOCK) {
-                               retval = -EAGAIN;
-                               break;
-                       }
-                       if (signal_pending(current)) {
-                               retval = -ERESTARTSYS;
-                               break;
-                       }
-                       n_tty_set_room(tty);
-                       up_read(&tty->termios_rwsem);
+                               up_read(&tty->termios_rwsem);
+                               tty_flush_to_ldisc(tty);
+                               down_read(&tty->termios_rwsem);
+                               if (!input_available_p(tty, 0)) {
+                                       retval = -EIO;
+                                       break;
+                               }
+                       } else {
+                               if (tty_hung_up_p(file))
+                                       break;
+                               if (!timeout)
+                                       break;
+                               if (file->f_flags & O_NONBLOCK) {
+                                       retval = -EAGAIN;
+                                       break;
+                               }
+                               if (signal_pending(current)) {
+                                       retval = -ERESTARTSYS;
+                                       break;
+                               }
+                               n_tty_set_room(tty);
+                               up_read(&tty->termios_rwsem);
 
-                       timeout = schedule_timeout(timeout);
+                               timeout = schedule_timeout(timeout);
 
-                       down_read(&tty->termios_rwsem);
-                       continue;
+                               down_read(&tty->termios_rwsem);
+                               continue;
+                       }
                }
                __set_current_state(TASK_RUNNING);
 
index d067285a2d203765d38d36535d588854988f75c2..6b0f75eac8a26de2c6a3a83eac061ae7a03295cd 100644 (file)
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
 /*
  * Get ip name usart or uart
  */
-static int atmel_get_ip_name(struct uart_port *port)
+static void atmel_get_ip_name(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
                atmel_port->is_usart = false;
        } else {
                dev_err(port->dev, "Not supported ip name, set to uart\n");
-               return -EINVAL;
        }
-
-       return 0;
 }
 
 /*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        /*
         * Get port name of usart or uart
         */
-       ret = atmel_get_ip_name(&port->uart);
-       if (ret < 0)
-               goto err_add_port;
+       atmel_get_ip_name(&port->uart);
 
        return 0;
 
index a0ebbc9ce5cdbacc69cc8c240f8d9fd76e43ec12..042aa077b5b3e166a8453ac0684da1dd024f6785 100644 (file)
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 
        sport->devdata = of_id->data;
 
-       if (of_device_is_stdout_path(np))
-               add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
-
        return 0;
 }
 #else
index 52379e56a31e7abc43a6642a799b1be7744be6c8..44077c0b7670075625650c9d00cc98a63bb90f5c 100644 (file)
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
 
 static int dma_push_rx(struct eg20t_port *priv, int size)
 {
-       struct tty_struct *tty;
        int room;
        struct uart_port *port = &priv->port;
        struct tty_port *tport = &port->state->port;
 
-       port = &priv->port;
-       tty = tty_port_tty_get(tport);
-       if (!tty) {
-               dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
-               return 0;
-       }
-
        room = tty_buffer_request_room(tport, size);
 
        if (room < size)
                dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
                         size - room);
        if (!room)
-               return room;
+               return 0;
 
        tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
 
        port->icount.rx += room;
-       tty_kref_put(tty);
 
        return room;
 }
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
        if (tty == NULL) {
                for (i = 0; error_msg[i] != NULL; i++)
                        dev_err(&priv->pdev->dev, error_msg[i]);
+       } else {
+               tty_kref_put(tty);
        }
 }
 
index d0d972f7e43e3759fb03119786a1103764c86730..0489a2bdcdf9a1aff4af5dacd1d7766f9671a523 100644 (file)
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
 static void tegra_uart_stop_rx(struct uart_port *u)
 {
        struct tegra_uart_port *tup = to_tegra_uport(u);
-       struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+       struct tty_struct *tty;
        struct tty_port *port = &u->state->port;
        struct dma_tx_state state;
        unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
        if (!tup->rx_in_progress)
                return;
 
+       tty = tty_port_tty_get(&tup->uport.state->port);
+
        tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
 
        ier = tup->ier_shadow;
index 537750261aaa2fe5a5e2de8fdb3e6ee0adf8fe4d..e7e9cabb21fd2a4a6613f075632e6f16f3432a33 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 
 #ifdef CONFIG_SUPERH
 #include <asm/sh_bios.h>
@@ -1433,7 +1434,7 @@ static void work_fn_rx(struct work_struct *work)
        desc = s->desc_rx[new];
 
        if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                /* Handle incomplete DMA receive */
                struct dma_chan *chan = s->chan_rx;
                struct shdma_desc *sh_desc = container_of(desc,
@@ -2437,6 +2438,112 @@ static int sci_remove(struct platform_device *dev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id of_sci_match[] = {
+       { .compatible = "renesas,sci-SCI-uart",
+               .data = (void *)PORT_SCI },
+       { .compatible = "renesas,sci-SCIF-uart",
+               .data = (void *)PORT_SCIF },
+       { .compatible = "renesas,sci-IRDA-uart",
+               .data = (void *)PORT_IRDA },
+       { .compatible = "renesas,sci-SCIFA-uart",
+               .data = (void *)PORT_SCIFA },
+       { .compatible = "renesas,sci-SCIFB-uart",
+               .data = (void *)PORT_SCIFB },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       struct plat_sci_port *p;
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct resource *res;
+       const __be32 *prop;
+       int i, irq, val;
+
+       match = of_match_node(of_sci_match, pdev->dev.of_node);
+       if (!match || !match->data) {
+               dev_err(&pdev->dev, "OF match error\n");
+               return NULL;
+       }
+
+       p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+       if (!p) {
+               dev_err(&pdev->dev, "failed to allocate DT config data\n");
+               return NULL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               return NULL;
+       }
+       p->mapbase = res->start;
+
+       for (i = 0; i < SCIx_NR_IRQS; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "failed to get irq data %d\n", i);
+                       return NULL;
+               }
+               p->irqs[i] = irq;
+       }
+
+       prop = of_get_property(np, "cell-index", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop cell-index missing\n");
+               return NULL;
+       }
+       *dev_id = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scscr", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scscr missing\n");
+               return NULL;
+       }
+       p->scscr = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scbrr-algo-id", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scbrr-algo-id missing\n");
+               return NULL;
+       }
+       val = be32_to_cpup(prop);
+       if (val <= SCBRR_ALGO_INVALID || val >= SCBRR_NR_ALGOS) {
+               dev_err(&pdev->dev, "DT prop scbrr-algo-id out of range\n");
+               return NULL;
+       }
+       p->scbrr_algo_id = val;
+
+       p->flags = UPF_IOREMAP;
+       if (of_get_property(np, "renesas,autoconf", NULL))
+               p->flags |= UPF_BOOT_AUTOCONF;
+
+       prop = of_get_property(np, "renesas,regtype", NULL);
+       if (prop) {
+               val = be32_to_cpup(prop);
+               if (val < SCIx_PROBE_REGTYPE || val >= SCIx_NR_REGTYPES) {
+                       dev_err(&pdev->dev, "DT prop regtype out of range\n");
+                       return NULL;
+               }
+               p->regtype = val;
+       }
+
+       p->type = (unsigned int)match->data;
+
+       return p;
+}
+#else
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 static int sci_probe_single(struct platform_device *dev,
                                      unsigned int index,
                                      struct plat_sci_port *p,
@@ -2469,9 +2576,9 @@ static int sci_probe_single(struct platform_device *dev,
 
 static int sci_probe(struct platform_device *dev)
 {
-       struct plat_sci_port *p = dev_get_platdata(&dev->dev);
-       struct sci_port *sp = &sci_ports[dev->id];
-       int ret;
+       struct plat_sci_port *p;
+       struct sci_port *sp;
+       int ret, dev_id = dev->id;
 
        /*
         * If we've come here via earlyprintk initialization, head off to
@@ -2481,9 +2588,20 @@ static int sci_probe(struct platform_device *dev)
        if (is_early_platform_device(dev))
                return sci_probe_earlyprintk(dev);
 
+       if (dev->dev.of_node)
+               p = sci_parse_dt(dev, &dev_id);
+       else
+               p = dev_get_platdata(&dev->dev);
+
+       if (!p) {
+               dev_err(&dev->dev, "no setup data supplied\n");
+               return -EINVAL;
+       }
+
+       sp = &sci_ports[dev_id];
        platform_set_drvdata(dev, sp);
 
-       ret = sci_probe_single(dev, dev->id, p, sp);
+       ret = sci_probe_single(dev, dev_id, p, sp);
        if (ret)
                return ret;
 
@@ -2535,6 +2653,7 @@ static struct platform_driver sci_driver = {
                .name   = "sh-sci",
                .owner  = THIS_MODULE,
                .pm     = &sci_dev_pm_ops,
+               .of_match_table = of_match_ptr(of_sci_match),
        },
 };
 
index 93b697a0de658ddc6769fedbe822b1151fef4bab..15ad6fcda88b323b2f5711b753ee76126741ea04 100644 (file)
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        if (!mmres || !irqres)
                return -ENODEV;
 
-       if (np)
+       if (np) {
                port = of_alias_get_id(np, "serial");
                if (port >= VT8500_MAX_PORTS)
                        port = -1;
-       else
+       } else {
                port = -1;
+       }
 
        if (port < 0) {
                /* calculate the port id */
index 03ba081c577251a0947466b79f23b36b359b51c1..6fd60fece6b4b01684695b7df4af376e695ff873 100644 (file)
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
                }
                return 0;
        case TCFLSH:
+               retval = tty_check_change(tty);
+               if (retval)
+                       return retval;
                return __tty_perform_flush(tty, arg);
        default:
                /* Try the mode commands */
index 4a851e15e58cdfb6832fbc14c93e3e96cb6f9515..77b47d82c9a69d05d7985068fb956f961b6481f1 100644 (file)
@@ -1,6 +1,6 @@
 config USB_CHIPIDEA
        tristate "ChipIdea Highspeed Dual Role Controller"
-       depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)
+       depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
        help
          Say Y here if your system has a dual role high speed USB
          controller based on ChipIdea silicon IP. Currently, only the
index 74d998d9b45b28001e2f15f6026640ecb6e07085..7ad541591c81c0db06b6dc6b4b3d7e6d49137103 100644 (file)
@@ -121,17 +121,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 
        pdata.phy = data->phy;
 
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_clk;
 
        if (data->usbmisc_data) {
                ret = imx_usbmisc_init(data->usbmisc_data);
                if (ret) {
                        dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
                                        ret);
-                       goto err_clk;
+                       goto err_phy;
                }
        }
 
@@ -143,7 +142,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register ci_hdrc platform device, err=%d\n",
                        ret);
-               goto err_clk;
+               goto err_phy;
        }
 
        if (data->usbmisc_data) {
@@ -164,6 +163,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 
 disable_device:
        ci_hdrc_remove_device(data->ci_pdev);
+err_phy:
+       if (data->phy)
+               usb_phy_shutdown(data->phy);
 err_clk:
        clk_disable_unprepare(data->clk);
        return ret;
index 042320a6c6c79278ac27191980c0fb63f7ddfc35..d514332ac0811c9fec55a40253b3c526d19cde6d 100644 (file)
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
                .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
        },
-       { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+       {
+               /* Intel Clovertrail */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
+               .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
+       },
+       { 0 } /* end: all zeroes */
 };
 MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
 
index 94626409559a96a79c0d027023dbade8a645ce58..23763dcec069b2e8d1ba6dd140fc9cf644d2ddec 100644 (file)
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
        dbg_remove_files(ci);
        free_irq(ci->irq, ci);
        ci_role_destroy(ci);
+       kfree(ci->hw_bank.regmap);
 
        return 0;
 }
index 6f96795dd20cc3ff3d0c08490ae8d87b0b2ccccf..64d7a6d9a1adcbd679258661a6b5875bd8bf834a 100644 (file)
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
 {
        struct usb_hcd *hcd = ci->hcd;
 
-       usb_remove_hcd(hcd);
-       usb_put_hcd(hcd);
+       if (hcd) {
+               usb_remove_hcd(hcd);
+               usb_put_hcd(hcd);
+       }
        if (ci->platdata->reg_vbus)
                regulator_disable(ci->platdata->reg_vbus);
 }
index 6b4c2f2eb94649c7da6fc577b54598d53ebdfec4..9333083dd1111c047c7016a44402242e2aa87f7d 100644 (file)
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
        for (i = 0; i < ci->hw_ep_max; i++) {
                struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
 
+               if (hwep->pending_td)
+                       free_pending_td(hwep);
                dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
        }
 }
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
                if (ci->platdata->notify_event)
                        ci->platdata->notify_event(ci,
                        CI_HDRC_CONTROLLER_STOPPED_EVENT);
-               ci->driver = NULL;
                spin_unlock_irqrestore(&ci->lock, flags);
                _gadget_stop_activity(&ci->gadget);
                spin_lock_irqsave(&ci->lock, flags);
                pm_runtime_put(&ci->gadget.dev);
        }
 
+       ci->driver = NULL;
        spin_unlock_irqrestore(&ci->lock, flags);
 
        return 0;
index 737e3c19967bee3e81f57cc041ec9989ec38fb57..71dc5d768fa5cef3edc5ac27c84c032d2a0a0dcf 100644 (file)
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
                if ((index & ~USB_DIR_IN) == 0)
                        return 0;
                ret = findintfep(ps->dev, index);
+               if (ret < 0) {
+                       /*
+                        * Some not fully compliant Win apps seem to get
+                        * index wrong and have the endpoint number here
+                        * rather than the endpoint address (with the
+                        * correct direction). Win does let this through,
+                        * so we'll not reject it here but leave it to
+                        * the device to not break KVM. But we warn.
+                        */
+                       ret = findintfep(ps->dev, index ^ 0x80);
+                       if (ret >= 0)
+                               dev_info(&ps->dev->dev,
+                                       "%s: process %i (%s) requesting ep %02x but needs %02x\n",
+                                       __func__, task_pid_nr(current),
+                                       current->comm, index, index ^ 0x80);
+               }
                if (ret >= 0)
                        ret = checkintf(ps, ret);
                break;
index dde4c83516a1870bd4ed7a11e3fe433ced0979af..e6b682c6c236b8152561496b2e00bba22e75e25c 100644 (file)
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
        unsigned long long u2_pel;
        int ret;
 
+       if (udev->state != USB_STATE_CONFIGURED)
+               return 0;
+
        /* Convert SEL and PEL stored in ns to us */
        u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
        u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
index 5b44cd47da5b28b30046bead355aa54eda3eec0f..01fe36273f3b144cb1c1fe3306ebdf16459c605f 100644 (file)
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Alcor Micro Corp. Hub */
        { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MicroTouch Systems touchscreen */
+       { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Broadcom BCM92035DGROM BT dongle */
        { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MAYA44USB sound device */
+       { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index b870872e020f1e0bd8e93c157b76d145bc2b2122..70fc43027a5c65b6f29f22dc9ec64c18db220323 100644 (file)
@@ -1,7 +1,6 @@
 config USB_DWC3
        tristate "DesignWare USB3 DRD Core Support"
        depends on (USB || USB_GADGET) && HAS_DMA
-       depends on EXTCON
        select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
        help
          Say Y or M here if your system has a Dual Role SuperSpeed
index 2f2e88a3a11a3c9a9adbcca6b104b54fa2f4352b..8b20c70d91e788c8e27791b98e454c4a5ba87ae7 100644 (file)
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err1;
 
        platform_set_drvdata(pdev, exynos);
 
index 9b138129e856aa22b27c4a83398adf55c8949247..2e252aae51ca0bcc5da41b3251c45cec91cced7d 100644 (file)
@@ -28,6 +28,8 @@
 /* FIXME define these in <linux/pci_ids.h> */
 #define PCI_VENDOR_ID_SYNOPSYS         0x16c3
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3        0xabcd
+#define PCI_DEVICE_ID_INTEL_BYT                0x0f37
+#define PCI_DEVICE_ID_INTEL_MRFLD      0x119e
 
 struct dwc3_pci {
        struct device           *dev;
@@ -187,6 +189,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
                PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
                                PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
        },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
        {  }    /* Terminating Entry */
 };
 MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
index f168eaebdef8fc6731de7c6994c658737495fdce..5452c0fce36074d4238e3553bb00d879d8df9ac3 100644 (file)
@@ -2611,15 +2611,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
        if (ret) {
                dev_err(dwc->dev, "failed to register udc\n");
-               goto err5;
+               goto err4;
        }
 
        return 0;
 
-err5:
-       dwc3_gadget_free_endpoints(dwc);
-
 err4:
+       dwc3_gadget_free_endpoints(dwc);
        dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
                        dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
index 5a5acf22c694514f5b3ebede7903f9912e4138b5..e126b6b248e63cd1b412bea2561e90bd15a104d3 100644 (file)
@@ -113,12 +113,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       fi_ecm = usb_get_function_instance("ecm");
-       if (IS_ERR(fi_ecm)) {
-               status = PTR_ERR(fi_ecm);
-               goto err_func_ecm;
-       }
-
        f_ecm = usb_get_function(fi_ecm);
        if (IS_ERR(f_ecm)) {
                status = PTR_ERR(f_ecm);
@@ -129,35 +123,24 @@ static int __init cdc_do_config(struct usb_configuration *c)
        if (status)
                goto err_add_ecm;
 
-       fi_serial = usb_get_function_instance("acm");
-       if (IS_ERR(fi_serial)) {
-               status = PTR_ERR(fi_serial);
-               goto err_get_acm;
-       }
-
        f_acm = usb_get_function(fi_serial);
        if (IS_ERR(f_acm)) {
                status = PTR_ERR(f_acm);
-               goto err_func_acm;
+               goto err_get_acm;
        }
 
        status = usb_add_function(c, f_acm);
        if (status)
                goto err_add_acm;
-
        return 0;
 
 err_add_acm:
        usb_put_function(f_acm);
-err_func_acm:
-       usb_put_function_instance(fi_serial);
 err_get_acm:
        usb_remove_function(c, f_ecm);
 err_add_ecm:
        usb_put_function(f_ecm);
 err_get_ecm:
-       usb_put_function_instance(fi_ecm);
-err_func_ecm:
        return status;
 }
 
index 06ecd08fd57a3174022a3579f458c84f30440123..b8a2376971a47aa357794117f74390aaee01a594 100644 (file)
@@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
        struct dummy_hcd        *dum_hcd = gadget_to_dummy_hcd(g);
        struct dummy            *dum = dum_hcd->dum;
 
-       dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
-                       driver->driver.name);
+       if (driver)
+               dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
+                               driver->driver.name);
 
        dum->driver = NULL;
 
@@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
 {
        struct dummy    *dum = platform_get_drvdata(pdev);
 
-       usb_del_gadget_udc(&dum->gadget);
        device_remove_file(&dum->gadget.dev, &dev_attr_function);
+       usb_del_gadget_udc(&dum->gadget);
        return 0;
 }
 
index edab45da37417e16960b75033a46a8453475b77b..8d9e6f7e8f1a5a6c36965dfdd4c5a213f4f917aa 100644 (file)
@@ -995,7 +995,7 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
        usb_ep_free_request(ecm->notify, ecm->notify_req);
 }
 
-struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
 {
        struct f_ecm    *ecm;
        struct f_ecm_opts *opts;
index d00392d879db3aa3e75d093d4e26041fa42bcb54..d61c11d765d0a92b9e9e79270e9de4b3cb474ca3 100644 (file)
@@ -624,7 +624,7 @@ static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
        usb_free_all_descriptors(f);
 }
 
-struct usb_function *eem_alloc(struct usb_function_instance *fi)
+static struct usb_function *eem_alloc(struct usb_function_instance *fi)
 {
        struct f_eem    *eem;
        struct f_eem_opts *opts;
index 1a66c5baa0d1f292188a1756cf17d5243b2ffe58..44cf775a86271ccd95cdf9577c283fc899e70d6c 100644 (file)
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
        struct ffs_file_perms perms;
        umode_t root_mode;
        const char *dev_name;
-       union {
-               /* set by ffs_fs_mount(), read by ffs_sb_fill() */
-               void *private_data;
-               /* set by ffs_sb_fill(), read by ffs_fs_mount */
-               struct ffs_data *ffs_data;
-       };
+       struct ffs_data *ffs_data;
 };
 
 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
 {
        struct ffs_sb_fill_data *data = _data;
        struct inode    *inode;
-       struct ffs_data *ffs;
+       struct ffs_data *ffs = data->ffs_data;
 
        ENTER();
 
-       /* Initialise data */
-       ffs = ffs_data_new();
-       if (unlikely(!ffs))
-               goto Enomem;
-
        ffs->sb              = sb;
-       ffs->dev_name        = kstrdup(data->dev_name, GFP_KERNEL);
-       if (unlikely(!ffs->dev_name))
-               goto Enomem;
-       ffs->file_perms      = data->perms;
-       ffs->private_data    = data->private_data;
-
-       /* used by the caller of this function */
-       data->ffs_data       = ffs;
-
+       data->ffs_data       = NULL;
        sb->s_fs_info        = ffs;
        sb->s_blocksize      = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
                                  &data->perms);
        sb->s_root = d_make_root(inode);
        if (unlikely(!sb->s_root))
-               goto Enomem;
+               return -ENOMEM;
 
        /* EP0 file */
        if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
                                         &ffs_ep0_operations, NULL)))
-               goto Enomem;
+               return -ENOMEM;
 
        return 0;
-
-Enomem:
-       return -ENOMEM;
 }
 
 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        struct dentry *rv;
        int ret;
        void *ffs_dev;
+       struct ffs_data *ffs;
 
        ENTER();
 
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        if (unlikely(ret < 0))
                return ERR_PTR(ret);
 
+       ffs = ffs_data_new();
+       if (unlikely(!ffs))
+               return ERR_PTR(-ENOMEM);
+       ffs->file_perms = data.perms;
+
+       ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+       if (unlikely(!ffs->dev_name)) {
+               ffs_data_put(ffs);
+               return ERR_PTR(-ENOMEM);
+       }
+
        ffs_dev = functionfs_acquire_dev_callback(dev_name);
-       if (IS_ERR(ffs_dev))
-               return ffs_dev;
+       if (IS_ERR(ffs_dev)) {
+               ffs_data_put(ffs);
+               return ERR_CAST(ffs_dev);
+       }
+       ffs->private_data = ffs_dev;
+       data.ffs_data = ffs;
 
-       data.dev_name = dev_name;
-       data.private_data = ffs_dev;
        rv = mount_nodev(t, flags, &data, ffs_sb_fill);
-
-       /* data.ffs_data is set by ffs_sb_fill */
-       if (IS_ERR(rv))
+       if (IS_ERR(rv) && data.ffs_data) {
                functionfs_release_dev_callback(data.ffs_data);
-
+               ffs_data_put(data.ffs_data);
+       }
        return rv;
 }
 
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
                                   data->raw_descs + ret,
                                   (sizeof data->raw_descs) - ret,
                                   __ffs_func_bind_do_descs, func);
+               if (unlikely(ret < 0))
+                       goto error;
        }
 
        /*
index 313b835eedfd58948d23a96a85ade673347e8114..a01d7d38c01685135a503885342be7f9ec63ebbd 100644 (file)
@@ -2260,10 +2260,12 @@ reset:
                /* Disable the endpoints */
                if (fsg->bulk_in_enabled) {
                        usb_ep_disable(fsg->bulk_in);
+                       fsg->bulk_in->driver_data = NULL;
                        fsg->bulk_in_enabled = 0;
                }
                if (fsg->bulk_out_enabled) {
                        usb_ep_disable(fsg->bulk_out);
+                       fsg->bulk_out->driver_data = NULL;
                        fsg->bulk_out_enabled = 0;
                }
 
index 32db2eee2d8738fe5388f8c00bd2c644ac2b5b20..bbbfd19487784c6ebbfb107a24a7233c45740a45 100644 (file)
@@ -1214,6 +1214,6 @@ static struct platform_driver fotg210_driver = {
 
 module_platform_driver(fotg210_driver);
 
-MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
index f1dd6daabe217c0742140c1619c3be5aa22c8c54..b278abe524539640f14a29f70a1f3a9d25fa1747 100644 (file)
@@ -22,7 +22,7 @@
 
 MODULE_DESCRIPTION("FUSB300  USB gadget driver");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
 MODULE_ALIAS("platform:fusb300_udc");
 
 #define DRIVER_VERSION "20 October 2010"
index 67128be1e1b70f25b29f8cc571b1b916a9c35f2c..6a2a65aa0057c9d822cd5d9bac61aaeac470bd71 100644 (file)
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
                 udc->isp1301_i2c_client->addr);
 
        pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto resource_fail;
 
        udc->board = &lpc32xx_usbddata;
 
index 2a1ebefd8f9eb31318fc861fbc010214148db18d..23393254a8a35593526062cc09880f50c809dced 100644 (file)
@@ -179,7 +179,7 @@ err_conf:
        return ret;
 }
 
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
 {
        static struct usb_configuration config = {
                .bConfigurationValue    = MULTI_RNDIS_CONFIG_NUM,
@@ -194,7 +194,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
 
 #else
 
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
 {
        return 0;
 }
@@ -241,7 +241,7 @@ err_conf:
        return ret;
 }
 
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
 {
        static struct usb_configuration config = {
                .bConfigurationValue    = MULTI_CDC_CONFIG_NUM,
@@ -256,7 +256,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
 
 #else
 
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
 {
        return 0;
 }
index bbb6e98c4384292d3446b62f8671decaf0a51c55..561b30efb8ee84de6fc37c33005cdadc6fbcc5ff 100644 (file)
@@ -645,6 +645,7 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
        struct mv_u3d_ep *ep;
        struct mv_u3d_ep_context *ep_context;
        u32 epxcr, direction;
+       unsigned long flags;
 
        if (!_ep)
                return -EINVAL;
@@ -661,7 +662,9 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
        direction = mv_u3d_ep_dir(ep);
 
        /* nuke all pending requests (does flush) */
+       spin_lock_irqsave(&u3d->lock, flags);
        mv_u3d_nuke(ep, -ESHUTDOWN);
+       spin_unlock_irqrestore(&u3d->lock, flags);
 
        /* Disable the endpoint for Rx or Tx and reset the endpoint type */
        if (direction == MV_U3D_EP_DIR_OUT) {
index cc9207473dbc33f204bbd4009eb9e2138f966da6..0ac6064aa3b86b6cd2376324ac9996f8ece1c0a0 100644 (file)
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
 /*
  *     probe - binds to the platform device
  */
-static int __init pxa25x_udc_probe(struct platform_device *pdev)
+static int pxa25x_udc_probe(struct platform_device *pdev)
 {
        struct pxa25x_udc *dev = &memory;
        int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
        pullup_off();
 }
 
-static int __exit pxa25x_udc_remove(struct platform_device *pdev)
+static int pxa25x_udc_remove(struct platform_device *pdev)
 {
        struct pxa25x_udc *dev = platform_get_drvdata(pdev);
 
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
 
 static struct platform_driver udc_driver = {
        .shutdown       = pxa25x_udc_shutdown,
-       .remove         = __exit_p(pxa25x_udc_remove),
+       .probe          = pxa25x_udc_probe,
+       .remove         = pxa25x_udc_remove,
        .suspend        = pxa25x_udc_suspend,
        .resume         = pxa25x_udc_resume,
        .driver         = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
        },
 };
 
-module_platform_driver_probe(udc_driver, pxa25x_udc_probe);
+module_platform_driver(udc_driver);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
index d69b36a99dbcd8ae5a1bc57275398be86a2562ca..a8a99e4748d522bded4aef1882a7beaafe45603b 100644 (file)
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
         * FIFO, requests of >512 cause the endpoint to get stuck with a
         * fragment of the end of the transfer in it.
         */
-       if (can_write > 512)
+       if (can_write > 512 && !periodic)
                can_write = 512;
 
        /*
@@ -2475,8 +2475,6 @@ irq_retry:
        if (gintsts & GINTSTS_ErlySusp) {
                dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
                writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
-
-               s3c_hsotg_disconnect(hsotg);
        }
 
        /*
@@ -2962,9 +2960,6 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
        if (!hsotg)
                return -ENODEV;
 
-       if (!driver || driver != hsotg->driver || !driver->unbind)
-               return -EINVAL;
-
        /* all endpoints should be shutdown */
        for (ep = 0; ep < hsotg->num_of_eps; ep++)
                s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2972,15 +2967,15 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
        spin_lock_irqsave(&hsotg->lock, flags);
 
        s3c_hsotg_phy_disable(hsotg);
-       regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
 
-       hsotg->driver = NULL;
+       if (!driver)
+               hsotg->driver = NULL;
+
        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
 
        spin_unlock_irqrestore(&hsotg->lock, flags);
 
-       dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
-                driver->driver.name);
+       regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
 
        return 0;
 }
index 08a1a3210a2117f43b29f899085a21c547a9e644..cd1431d850c4d84a925286de4a47667f24b19fe4 100644 (file)
@@ -450,11 +450,11 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
         * If we can't read the file, it's no good.
         * If we can't write the file, use it read-only.
         */
-       if (!(filp->f_op->read || filp->f_op->aio_read)) {
+       if (!file_readable(filp)) {
                LINFO(curlun, "file not readable: %s\n", filename);
                goto out;
        }
-       if (!(filp->f_op->write || filp->f_op->aio_write))
+       if (!file_writable(filp))
                ro = 1;
 
        size = i_size_read(inode->i_mapping->host);
index df13d425e9c5fd4700db812505978c4876ac5dd7..205f4a336583de6c32b608854a943cc121dc1dc4 100644 (file)
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
 
        /* TODO: Probably need checks here; is the core connected? */
 
-       if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-           dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+       if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
                return -EOPNOTSUPP;
 
        usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
index 3b645ff46f7b9f8df67d2d793c1758be26173b21..8e7323e07f794435d001ee2df81135fa5bc2325e 100644 (file)
@@ -90,10 +90,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail_create_hcd;
 
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
index 947b009009f111ba710954ea639962b4d4008463..f2407b2e8a996210aec7f3e7a442119fd6928924 100644 (file)
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
        }
 
        /* Enable USB controller, 83xx or 8536 */
-       if (pdata->have_sysif_regs)
+       if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
                setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
 
        /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
        case FSL_USB2_PHY_ULPI:
                if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
+                       clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
                        setbits32(non_ehci + FSL_SOC_USB_CTRL,
-                                       ULPI_PHY_CLK_SEL);
-                       /*
-                        * Due to controller issue of PHY_CLK_VALID in ULPI
-                        * mode, we set USB_CTRL_USB_EN before checking
-                        * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
-                        */
-                       clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
-                                       UTMI_PHY_EN, USB_CTRL_USB_EN);
+                               ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
                }
                portsc |= PORT_PTS_ULPI;
                break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
        if (pdata->have_sysif_regs && pdata->controller_ver &&
            (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
-               if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
-                               PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
+               if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+                               PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+                               in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
                        printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
                        return -EINVAL;
                }
@@ -669,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags = HCD_USB2 | HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index b52a66ce92e8592b123239aa24b724dddfd085fa..83ab51af250f158e735373760f8c008ce6fe0ad8 100644 (file)
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 5d6022f30ebe94b2228c6b38851217af1ee8f477..86ab9fd9fe9e938fc29cadeac72a935e8c26b8ff 100644 (file)
@@ -1158,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
         * generic hardware linkage
         */
        .irq =                  ehci_irq,
-       .flags =                HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags =                HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 417c10da945078e37ddf20e7be290e996936074e..35cdbd88bbbef62a93a3aa869c12dbfda8183bf2 100644 (file)
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index ab0397e4d8f3eadae916d07434f4431f3d59def3..323a02b1a0a65778b0e7dd74ad9423e632444402 100644 (file)
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
         * We can DMA from anywhere. But the descriptors must be in
         * the lower 4GB.
         */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
        if (!hcd)
index 78b01fa475bbfc85e0aa35fcd9ab917b963689ee..6fa82d6b7661bea07d38dd249d76a80843ff4d01 100644 (file)
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        struct resource *res;
        struct usb_hcd  *hcd;
        void __iomem *regs;
-       int ret = -ENODEV;
+       int ret;
        int irq;
        int i;
        struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
+       ret = -ENODEV;
        hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
index d1dfb9db5b420845edec3a30b81bdd30792704e9..2ba76730e6509ea8f9372234be96f9ea81796cd1 100644 (file)
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
         * set. Since shared usb code relies on it, set it here for
         * now. Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               goto err1;
 
        if (!request_mem_region(res->start, resource_size(res),
                                ehci_orion_hc_driver.description)) {
index 6bd299e61f58d23202183f3186c9542f99e93a23..854c2ec7b699d4effe2c9ca6ae4ab264e1a73cd8 100644 (file)
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
        .remove =       usb_hcd_pci_remove,
        .shutdown =     usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver =       {
                .pm =   &usb_hcd_pci_pm_ops
        },
index f6b790ca8cf2415805d827fcceebbde48291a935..7f30b7168d5a53542cdbea786d9cba849a932579 100644 (file)
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
        struct resource *res_mem;
        struct usb_ehci_pdata *pdata;
        int irq;
-       int err = -ENOMEM;
+       int err;
 
        if (usb_disabled())
                return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
         */
        if (!dev_get_platdata(&dev->dev))
                dev->dev.platform_data = &ehci_platform_defaults;
-       if (!dev->dev.dma_mask)
-               dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-       if (!dev->dev.coherent_dma_mask)
-               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+       err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        pdata = dev_get_platdata(&dev->dev);
 
index 893b707f0000abf0e323f39b28b6060a3293bcef..601e208bd782c07e9d0bb1b60d238ccbb7774758 100644 (file)
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
 #else
        .irq =                  ehci_irq,
 #endif
-       .flags =                HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags =                HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 6cc5567bf9c87faaa8c4a21dcb4202e8e6e7fb94..932293fa32de657de2e36ba091127e50009a6ff7 100644 (file)
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 8188542ba17ea01214a3ab0f269fe07cb6cb1744..fd983771b02559cb56c6210e6813d9b223a80f7a 100644 (file)
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
        .product_desc           = "PS3 EHCI Host Controller",
        .hcd_priv_size          = sizeof(struct ehci_hcd),
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
        .reset                  = ps3_ehci_hc_reset,
        .start                  = ehci_run,
        .stop                   = ehci_stop,
index e321804c34755553afcf3c1ceab38678ed27af1b..a7f776a13eb17133459f23ac584b60287e544197 100644 (file)
@@ -247,6 +247,8 @@ static int qtd_copy_status (
 
 static void
 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+__releases(ehci->lock)
+__acquires(ehci->lock)
 {
        if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
                /* ... update hc-wide periodic stats */
@@ -272,8 +274,11 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
                urb->actual_length, urb->transfer_buffer_length);
 #endif
 
+       /* complete() can reenter this HCD */
        usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+       spin_unlock (&ehci->lock);
        usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
+       spin_lock (&ehci->lock);
 }
 
 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
index 7c3de95c7054339a5f62b64322f55a24f2bb286b..d919ed47bd47fdc8f631b1804e74bd54e134cf17 100644 (file)
@@ -89,10 +89,9 @@ static int s5p_ehci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        s5p_setup_vbus_gpio(pdev);
 
index 8a734498079bc176938c57a6b132c146e9be00dd..b2de52d3961488f249aeb9d4026efe2d603b72bb 100644 (file)
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index dc899eb2b86183561351d78e8dba1ceffc9cbb18..93e59a13bc1fec919ef81690bbce9f4bdcd3e951 100644 (file)
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
         * generic hardware linkage
         */
        .irq                            = ehci_irq,
-       .flags                          = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags                          = HCD_USB2 | HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index 1cf0adba3fc8dd4b3e071e8eeef769c5e9fbf1a2..ee6f9ffaa0e73ef9a6ca237d4e28960162e01d51 100644 (file)
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail;
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 78fa76da332435a83cc3c55226d33cbeb2e78afe..e74aaf3f016450a84ee42600ce584bebab41d9b0 100644 (file)
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
                                        dev_name(&pdev->dev));
index 67026ffbf9a871c9780a4b0b7f0d6c739c904e73..cca4be90a864dba009c852f606653fb5fe60d568 100644 (file)
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
         * Generic hardware linkage.
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * Basic lifecycle operations.
index 1c370dfbee0d35e6a3cf2b1288836df2116599c5..59e0e24c753febfb76369be8f872cb3731c2f365 100644 (file)
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_USB2|HCD_MEMORY|HCD_BH,
+       .flags = HCD_USB2|HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index 95979f9f4381d8e8e573c7e0fe254d5585d23ab8..eba962e6ebfbbd8ffdd46a85e0fed5ec9b18b78a 100644 (file)
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 9e0020d9e4c8cc01353ef4b804a9ba9d50bc8cfd..abd5050a4899bdb4518294175477a418583c4e83 100644 (file)
@@ -24,7 +24,7 @@ struct fsl_usb2_dev_data {
        enum fsl_usb2_operating_modes op_mode;  /* operating mode */
 };
 
-struct fsl_usb2_dev_data dr_mode_data[] = {
+static struct fsl_usb2_dev_data dr_mode_data[] = {
        {
                .dr_mode = "host",
                .drivers = { "fsl-ehci", NULL, NULL, },
@@ -42,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] = {
        },
 };
 
-struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
 {
        const unsigned char *prop;
        int i;
@@ -75,7 +75,7 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
        return FSL_USB2_PHY_NONE;
 }
 
-struct platform_device *fsl_usb2_device_register(
+static struct platform_device *fsl_usb2_device_register(
                                        struct platform_device *ofdev,
                                        struct fsl_usb2_platform_data *pdata,
                                        const char *name, int id)
index 60a5de505ca1845609fe34278405e752ddf73226..adb01d950a165f7cf7d5e1a54fe6b674b4cb533a 100644 (file)
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
                        i = DIV_ROUND_UP(wrap_frame(
                                        cur_frame - urb->start_frame),
                                        urb->interval);
-                       if (urb->transfer_flags & URB_ISO_ASAP) {
+
+                       /* Treat underruns as if URB_ISO_ASAP was set */
+                       if ((urb->transfer_flags & URB_ISO_ASAP) ||
+                                       i >= urb->number_of_packets) {
                                urb->start_frame = wrap_frame(urb->start_frame
                                                + i * urb->interval);
                                i = 0;
-                       } else if (i >= urb->number_of_packets) {
-                               ret = -EXDEV;
-                               goto alloc_dmem_failed;
                        }
                }
        }
index caa3764a34075e4735a9aca7d907f8fa693fde83..36423db63073bc9bce3d6fa28dbf624006f9b6e9 100644 (file)
@@ -524,7 +524,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
 static int ohci_at91_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       int i, gpio;
+       int i, gpio, ret;
        enum of_gpio_flags flags;
        struct at91_usbh_data   *pdata;
        u32 ports;
@@ -536,10 +536,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index dc6ee9adacf58679305df6baa851167e34e61ed4..866f2464f9de64c84ac96114c96dccb52a472d32 100644 (file)
@@ -114,10 +114,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        exynos_ohci = devm_kzalloc(&pdev->dev, sizeof(struct exynos_ohci_hcd),
                                        GFP_KERNEL);
index 8f6b695af6a470c5f5d5a5ca6a278e3a1186dfcc..604cad1bcf9cd984666aaf0b2b797b48ee6e3382 100644 (file)
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
                        frame &= ~(ed->interval - 1);
                        frame |= ed->branch;
                        urb->start_frame = frame;
+                       ed->last_iso = frame + ed->interval * (size - 1);
                }
        } else if (ed->type == PIPE_ISOCHRONOUS) {
                u16     next = ohci_frame_no(ohci) + 1;
                u16     frame = ed->last_iso + ed->interval;
+               u16     length = ed->interval * (size - 1);
 
                /* Behind the scheduling threshold? */
                if (unlikely(tick_before(frame, next))) {
 
-                       /* USB_ISO_ASAP: Round up to the first available slot */
+                       /* URB_ISO_ASAP: Round up to the first available slot */
                        if (urb->transfer_flags & URB_ISO_ASAP) {
                                frame += (next - frame + ed->interval - 1) &
                                                -ed->interval;
 
                        /*
-                        * Not ASAP: Use the next slot in the stream.  If
-                        * the entire URB falls before the threshold, fail.
+                        * Not ASAP: Use the next slot in the stream,
+                        * no matter what.
                         */
                        } else {
-                               if (tick_before(frame + ed->interval *
-                                       (urb->number_of_packets - 1), next)) {
-                                       retval = -EXDEV;
-                                       usb_hcd_unlink_urb_from_ep(hcd, urb);
-                                       goto fail;
-                               }
-
                                /*
                                 * Some OHCI hardware doesn't handle late TDs
                                 * correctly.  After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
                                urb_priv->td_cnt = DIV_ROUND_UP(
                                                (u16) (next - frame),
                                                ed->interval);
+                               if (urb_priv->td_cnt >= urb_priv->length) {
+                                       ++urb_priv->td_cnt;     /* Mark it */
+                                       ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+                                                       urb, frame, length,
+                                                       next);
+                               }
                        }
                }
                urb->start_frame = frame;
+               ed->last_iso = frame + length;
        }
 
        /* fill the TDs and link them to the ed; and
index 7d7d507d54e83ef89cdd0695d694f7e3bb4a5e7d..df3eb3e0324ea9ffde44836ed1788cd509d30d19 100644 (file)
@@ -226,8 +226,9 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail_disable;
 
        dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
        if (usb_disabled()) {
index 342dc7e543b81afd5b17377ac351caf2405b7a02..6c16dcef15c6fe0ee47d063a931cd7330c81da29 100644 (file)
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
        }
 
        /* Ohci is a 32-bit device. */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
        if (!hcd)
index a09af26f69ed4efdde274e028eb88110231cfc18..db9bd6bc97b99106a80093d97262401c6862e167 100644 (file)
@@ -132,7 +132,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
        struct usb_hcd          *hcd = NULL;
        void __iomem            *regs = NULL;
        struct resource         *res;
-       int                     ret = -ENODEV;
+       int                     ret;
        int                     irq;
 
        if (usb_disabled())
@@ -166,11 +166,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_io;
 
+       ret = -ENODEV;
        hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
index 93371a235e821fac080068f7522bc5a80576119c..b64949bc43e2ebacc1620a76faf6d9db2935b16e 100644 (file)
@@ -287,6 +287,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct pxaohci_platform_data *pdata;
        u32 tmp;
+       int ret;
 
        if (!np)
                return 0;
@@ -295,10 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index df4a6707322d322dc292e4d11bfdf926e34dd3ce..e7f577e636240b25a74f6b5f4b43c9ebdc99e9de 100644 (file)
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
 __releases(ohci->lock)
 __acquires(ohci->lock)
 {
-        struct device *dev = ohci_to_hcd(ohci)->self.controller;
+       struct device *dev = ohci_to_hcd(ohci)->self.controller;
+       struct usb_host_endpoint *ep = urb->ep;
+       struct urb_priv *urb_priv;
+
        // ASSERT (urb->hcpriv != 0);
 
+ restart:
        urb_free_priv (ohci, urb->hcpriv);
        urb->hcpriv = NULL;
        if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
                ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
                ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
        }
+
+       /*
+        * An isochronous URB that is sumitted too late won't have any TDs
+        * (marked by the fact that the td_cnt value is larger than the
+        * actual number of TDs).  If the next URB on this endpoint is like
+        * that, give it back now.
+        */
+       if (!list_empty(&ep->urb_list)) {
+               urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+               urb_priv = urb->hcpriv;
+               if (urb_priv->td_cnt > urb_priv->length) {
+                       status = 0;
+                       goto restart;
+               }
+       }
 }
 
 
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
                td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
                *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
                                                (data & 0x0FFF) | 0xE000);
-               td->ed->last_iso = info & 0xffff;
        } else {
                td->hwCBP = cpu_to_hc32 (ohci, data);
        }
@@ -996,7 +1014,7 @@ rescan_this:
                        urb_priv->td_cnt++;
 
                        /* if URB is done, clean up */
-                       if (urb_priv->td_cnt == urb_priv->length) {
+                       if (urb_priv->td_cnt >= urb_priv->length) {
                                modified = completed = 1;
                                finish_urb(ohci, urb, 0);
                        }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
        urb_priv->td_cnt++;
 
        /* If all this urb's TDs are done, call complete() */
-       if (urb_priv->td_cnt == urb_priv->length)
+       if (urb_priv->td_cnt >= urb_priv->length)
                finish_urb(ohci, urb, status);
 
        /* clean schedule:  unlink EDs that are no longer busy */
index 17b2a7dad77b81cb8d7edb8e69a97ae490d78a63..aa9e127bbe718d52b8c994910c2c30826f5db06e 100644 (file)
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
        if (usb_disabled())
                return -ENODEV;
 
+       /*
+        * We don't call dma_set_mask_and_coherent() here because the
+        * DMA mask has already been appropraitely setup by the core
+        * SA-1111 bus code (which includes bug workarounds.)
+        */
+
        hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
        if (!hcd)
                return -ENOMEM;
index cc9dd9e4f05e69469eeca87233dd0f29bac23992..075bb5e9b43fd0c4df8ddf63cfceffcc5ba1c81f 100644 (file)
@@ -111,10 +111,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail;
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 2c76ef1320eab679e1dd1aabd1c8466981392efc..08ef2829a7e2b1c06bbd0af51f308884c8ad9a26 100644 (file)
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * switchable ports.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
                        &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * host.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
                        &ports_available);
index 74af2c6287d2670b321e1edb317f01c0ed2f333b..0196f766df734f48352fd8c5217d08963aab444f 100644 (file)
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
 
        /* TODO: Probably need checks here; is the core connected? */
 
-       if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-           dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+       if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
                return -EOPNOTSUPP;
 
        usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
index c300bd2f7d1c53e0ee684b0c6f9cab185fb1c55c..0f228c46eedaab97c9351f784b8604d6144bb517 100644 (file)
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
        .remove =       usb_hcd_pci_remove,
        .shutdown =     uhci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver =       {
                .pm =   &usb_hcd_pci_pm_ops
        },
index d033a0ec7f0d02bd3874ae5e7339d43e964d2611..f8548b72f7089c1d3e10bebf1f8c070759798b7e 100644 (file)
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
                        pdev->name);
index 041c6ddb695c8ec6fa17d371fe7904de2e47d5ab..da6f56d996ce56f61cd1be20a093c2c16ff54702 100644 (file)
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                }
 
                /* Fell behind? */
-               if (uhci_frame_before_eq(frame, next)) {
+               if (!uhci_frame_before_eq(next, frame)) {
 
                        /* USB_ISO_ASAP: Round up to the first available slot */
                        if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                                                -qh->period;
 
                        /*
-                        * Not ASAP: Use the next slot in the stream.  If
-                        * the entire URB falls before the threshold, fail.
+                        * Not ASAP: Use the next slot in the stream,
+                        * no matter what.
                         */
                        else if (!uhci_frame_before_eq(next,
                                        frame + (urb->number_of_packets - 1) *
                                                qh->period))
-                               return -EXDEV;
+                               dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+                                               urb, frame,
+                                               (urb->number_of_packets - 1) *
+                                                       qh->period,
+                                               next);
                }
        }
 
index fae697ed0b708352e06e6e6135a2b49ece4ea34a..e8b4c56dcf62adf1f5326e8609087cd387fedc2e 100644 (file)
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
                        xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
        }
-       cmd->command_trb = xhci->cmd_ring->enqueue;
+       cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
        xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
        xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
  *  - Mark a port as being done with device resume,
  *    and ring the endpoint doorbells.
  *  - Stop the Synopsys redriver Compliance Mode polling.
+ *  - Drop and reacquire the xHCI lock, in order to wait for port resume.
  */
 static u32 xhci_get_port_status(struct usb_hcd *hcd,
                struct xhci_bus_state *bus_state,
                __le32 __iomem **port_array,
-               u16 wIndex, u32 raw_port_status)
+               u16 wIndex, u32 raw_port_status,
+               unsigned long flags)
+       __releases(&xhci->lock)
+       __acquires(&xhci->lock)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        return 0xffffffff;
                if (time_after_eq(jiffies,
                                        bus_state->resume_done[wIndex])) {
+                       int time_left;
+
                        xhci_dbg(xhci, "Resume USB2 port %d\n",
                                        wIndex + 1);
                        bus_state->resume_done[wIndex] = 0;
                        clear_bit(wIndex, &bus_state->resuming_ports);
+
+                       set_bit(wIndex, &bus_state->rexit_ports);
                        xhci_set_link_state(xhci, port_array, wIndex,
                                        XDEV_U0);
-                       xhci_dbg(xhci, "set port %d resume\n",
-                                       wIndex + 1);
-                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       wIndex + 1);
-                       if (!slot_id) {
-                               xhci_dbg(xhci, "slot_id is zero\n");
-                               return 0xffffffff;
+
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       time_left = wait_for_completion_timeout(
+                                       &bus_state->rexit_done[wIndex],
+                                       msecs_to_jiffies(
+                                               XHCI_MAX_REXIT_TIMEOUT));
+                       spin_lock_irqsave(&xhci->lock, flags);
+
+                       if (time_left) {
+                               slot_id = xhci_find_slot_id_by_port(hcd,
+                                               xhci, wIndex + 1);
+                               if (!slot_id) {
+                                       xhci_dbg(xhci, "slot_id is zero\n");
+                                       return 0xffffffff;
+                               }
+                               xhci_ring_device(xhci, slot_id);
+                       } else {
+                               int port_status = xhci_readl(xhci,
+                                               port_array[wIndex]);
+                               xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+                                               XHCI_MAX_REXIT_TIMEOUT,
+                                               port_status);
+                               status |= USB_PORT_STAT_SUSPEND;
+                               clear_bit(wIndex, &bus_state->rexit_ports);
                        }
-                       xhci_ring_device(xhci, slot_id);
+
                        bus_state->port_c_suspend |= 1 << wIndex;
                        bus_state->suspended_ports &= ~(1 << wIndex);
                } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        break;
                }
                status = xhci_get_port_status(hcd, bus_state, port_array,
-                               wIndex, temp);
+                               wIndex, temp, flags);
                if (status == 0xffffffff)
                        goto error;
 
@@ -1132,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                t1 = xhci_port_state_to_neutral(t1);
                if (t1 != t2)
                        xhci_writel(xhci, t2, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* enable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Get the port power control register address. */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp |= PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1222,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                                xhci_ring_device(xhci, slot_id);
                } else
                        xhci_writel(xhci, temp, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* disable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Add one to the port status register address to get
-                        * the port power control register address.
-                        */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp &= ~PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
 
        (void) xhci_readl(xhci, &xhci->op_regs->command);
index 53b972c2a09f10be38a474042d03757618bc76a4..83bcd13622c3466e655a00166bf6e8076ae198bc 100644 (file)
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        for (i = 0; i < USB_MAXCHILDREN; ++i) {
                xhci->bus_state[0].resume_done[i] = 0;
                xhci->bus_state[1].resume_done[i] = 0;
+               /* Only the USB 2.0 completions will ever be used. */
+               init_completion(&xhci->bus_state[1].rexit_done[i]);
        }
 
        if (scratchpad_alloc(xhci, flags))
index c2d495057eb538a74db9a01fc69af12442ca8d45..b8dffd59eb256e52786328e5e4f1919846a80d9c 100644 (file)
@@ -35,6 +35,9 @@
 #define PCI_VENDOR_ID_ETRON            0x1b6f
 #define PCI_DEVICE_ID_ASROCK_P67       0x7023
 
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+
 static const char hcd_name[] = "xhci_hcd";
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "QUIRK: Fresco Logic xHC needs configure"
                                " endpoint cmd after reset endpoint");
                }
+               if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+                               pdev->revision == 0x4) {
+                       xhci->quirks |= XHCI_SLOW_SUSPEND;
+                       xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+                               "QUIRK: Fresco Logic xHC revision %u"
+                               "must be suspended extra slowly",
+                               pdev->revision);
+               }
                /* Fresco Logic confirms: all revisions of this chip do not
                 * support MSI, even though some of them claim to in their PCI
                 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_AVOID_BEI;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+               /* Workaround for occasional spurious wakeups from S5 (or
+                * any other sleep) on Haswell machines with LPT and LPT-LP
+                * with the new Intel BIOS
+                */
+               xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
                usb_put_hcd(xhci->shared_hcd);
        }
        usb_hcd_pci_remove(dev);
+
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(dev, PCI_D3hot);
+
        kfree(xhci);
 }
 
@@ -351,7 +376,7 @@ static struct pci_driver xhci_pci_driver = {
        /* suspend and resume implemented later */
 
        .shutdown =     usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver = {
                .pm = &usb_hcd_pci_pm_ops
        },
index 411da1fc7ae8ad0550df9cecb8d5a4bdab0fa543..6bfbd80ec2b9edfa0a079767381d7d733d8c7035 100644 (file)
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
        return TRB_TYPE_LINK_LE32(link->control);
 }
 
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+       /* Enqueue pointer can be left pointing to the link TRB,
+        * we must handle that
+        */
+       if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+               return ring->enq_seg->next->trbs;
+       return ring->enqueue;
+}
+
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
                /* Otherwise ring the doorbell(s) to restart queued transfers */
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
-       ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
+
+       /* Clear stopped_td and stopped_trb if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED)) {
+               ep->stopped_td = NULL;
+               ep->stopped_trb = NULL;
+       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                        inc_deq(xhci, xhci->cmd_ring);
                        return;
                }
+               /* There is no command to handle if we get a stop event when the
+                * command ring is empty, event->cmd_trb points to the next
+                * unset command
+                */
+               if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+                       return;
        }
 
        switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
+       /*
+        * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
+        * RExit to a disconnect state).  If so, let the the driver know it's
+        * out of the RExit state.
+        */
+       if (!DEV_SUPERSPEED(temp) &&
+                       test_and_clear_bit(faked_port_index,
+                               &bus_state->rexit_ports)) {
+               complete(&bus_state->rexit_done[faked_port_index]);
+               bogus_port_status = true;
+               goto cleanup;
+       }
+
        if (hcd->speed != HCD_USB3)
                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
                                        PORT_PLC);
index 49b6edb84a79eccfd5f935db8e03bad0957ee5ad..6e0d886bcce52c19361d321cf68c0abacf5b054c 100644 (file)
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
 
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               xhci_reset(xhci);
        spin_unlock_irq(&xhci->lock);
 
        xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "xhci_shutdown completed - status = %x",
                        xhci_readl(xhci, &xhci->op_regs->status));
+
+       /* Yet another workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
 }
 
 #ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 int xhci_suspend(struct xhci_hcd *xhci)
 {
        int                     rc = 0;
+       unsigned int            delay = XHCI_MAX_HALT_USEC;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
 
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command &= ~CMD_RUN;
        xhci_writel(xhci, command, &xhci->op_regs->command);
+
+       /* Some chips from Fresco Logic need an extraordinary delay */
+       delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
+
        if (xhci_handshake(xhci, &xhci->op_regs->status,
-                     STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+                     STS_HALT, STS_HALT, delay)) {
                xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
@@ -2598,15 +2610,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        if (command) {
                cmd_completion = command->completion;
                cmd_status = &command->status;
-               command->command_trb = xhci->cmd_ring->enqueue;
-
-               /* Enqueue pointer can be left pointing to the link TRB,
-                * we must handle that
-                */
-               if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
-                       command->command_trb =
-                               xhci->cmd_ring->enq_seg->next->trbs;
-
+               command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
                list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
        } else {
                cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2618,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        }
        init_completion(cmd_completion);
 
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        if (!ctx_change)
                ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
                                udev->slot_id, must_succeed);
@@ -3439,14 +3443,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
 
        /* Attempt to submit the Reset Device command to the command ring */
        spin_lock_irqsave(&xhci->lock, flags);
-       reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
-
-       /* Enqueue pointer can be left pointing to the link TRB,
-        * we must handle that
-        */
-       if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
-               reset_device_cmd->command_trb =
-                       xhci->cmd_ring->enq_seg->next->trbs;
+       reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 
        list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
        ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3647,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
        union xhci_trb *cmd_trb;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
        if (ret) {
                spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3782,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
                                slot_ctx->dev_info >> 27);
 
        spin_lock_irqsave(&xhci->lock, flags);
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
                                        udev->slot_id);
        if (ret) {
index 46aa14894148962e3aff6c8028f8d07cbcb07951..941d5f59e4dcc254770bac770ba024e36a677bad 100644 (file)
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
        unsigned long           resume_done[USB_MAXCHILDREN];
        /* which ports have started to resume */
        unsigned long           resuming_ports;
+       /* Which ports are waiting on RExit to U0 transition. */
+       unsigned long           rexit_ports;
+       struct completion       rexit_done[USB_MAXCHILDREN];
 };
 
+
+/*
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+#define        XHCI_MAX_REXIT_TIMEOUT  (20 * 1000)
+
 static inline unsigned int hcd_index(struct usb_hcd *hcd)
 {
        if (hcd->speed == HCD_USB3)
@@ -1538,6 +1548,8 @@ struct xhci_hcd {
 #define XHCI_COMP_MODE_QUIRK   (1 << 14)
 #define XHCI_AVOID_BEI         (1 << 15)
 #define XHCI_PLAT              (1 << 16)
+#define XHCI_SLOW_SUSPEND      (1 << 17)
+#define XHCI_SPURIOUS_WAKEUP   (1 << 18)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
@@ -1840,6 +1852,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
                union xhci_trb *cmd_trb);
 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
                unsigned int ep_index, unsigned int stream_id);
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
 
 /* xHCI roothub code */
 void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
index e2b21c1d9c403088d704d12901c8049e329c360f..ba5f70f92888774c5ef900d44c6f8ad941e6b4e9 100644 (file)
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
 config USB_HSIC_USB3503
        tristate "USB3503 HSIC to USB20 Driver"
        depends on I2C
-       select REGMAP
+       select REGMAP_I2C
        help
          This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
index 5c310c664218dec6d1e71874ee8dc84f0232fa19..790b22b296b11f527b2060a76e6aa89624d8d804 100644 (file)
@@ -89,7 +89,6 @@ struct am35x_glue {
        struct clk              *phy_clk;
        struct clk              *clk;
 };
-#define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
 /*
  * am35x_musb_enable - enable interrupts
@@ -452,14 +451,18 @@ static const struct musb_platform_ops am35x_ops = {
        .set_vbus       = am35x_musb_set_vbus,
 };
 
-static u64 am35x_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info am35x_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int am35x_probe(struct platform_device *pdev)
 {
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct am35x_glue               *glue;
-
+       struct platform_device_info     pinfo;
        struct clk                      *phy_clk;
        struct clk                      *clk;
 
@@ -471,12 +474,6 @@ static int am35x_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        phy_clk = clk_get(&pdev->dev, "fck");
        if (IS_ERR(phy_clk)) {
                dev_err(&pdev->dev, "failed to get PHY clock\n");
@@ -503,12 +500,7 @@ static int am35x_probe(struct platform_device *pdev)
                goto err6;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &am35x_dmamask;
-       musb->dev.coherent_dma_mask     = am35x_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->phy_clk                   = phy_clk;
        glue->clk                       = clk;
 
@@ -516,22 +508,17 @@ static int am35x_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err7;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err7;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = am35x_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = pdev->resource;
+       pinfo.num_res = pdev->num_resources;
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err7;
        }
 
@@ -550,9 +537,6 @@ err4:
        clk_put(phy_clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index d9ddf4122f37e6ddfd19661eb9cb1448306525b6..2f2c1cb364218833f40468429d1469299a56e406 100644 (file)
@@ -472,7 +472,11 @@ static const struct musb_platform_ops da8xx_ops = {
        .set_vbus       = da8xx_musb_set_vbus,
 };
 
-static u64 da8xx_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info da8xx_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int da8xx_probe(struct platform_device *pdev)
 {
@@ -480,7 +484,7 @@ static int da8xx_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct da8xx_glue               *glue;
-
+       struct platform_device_info     pinfo;
        struct clk                      *clk;
 
        int                             ret = -ENOMEM;
@@ -491,12 +495,6 @@ static int da8xx_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        clk = clk_get(&pdev->dev, "usb20");
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "failed to get clock\n");
@@ -510,12 +508,7 @@ static int da8xx_probe(struct platform_device *pdev)
                goto err4;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &da8xx_dmamask;
-       musb->dev.coherent_dma_mask     = da8xx_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->clk                       = clk;
 
        pdata->platform_ops             = &da8xx_ops;
@@ -535,22 +528,17 @@ static int da8xx_probe(struct platform_device *pdev)
        musb_resources[1].end = pdev->resource[1].end;
        musb_resources[1].flags = pdev->resource[1].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err5;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err5;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = da8xx_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err5;
        }
 
@@ -563,9 +551,6 @@ err4:
        clk_put(clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index ed0834e2b72eeaa6c9dbe73bdb77c2f433282b42..45aae0bbb8dfb2b03f9af13aecd767907ec01ebb 100644 (file)
@@ -505,7 +505,11 @@ static const struct musb_platform_ops davinci_ops = {
        .set_vbus       = davinci_musb_set_vbus,
 };
 
-static u64 davinci_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info davinci_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int davinci_probe(struct platform_device *pdev)
 {
@@ -513,6 +517,7 @@ static int davinci_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct davinci_glue             *glue;
+       struct platform_device_info     pinfo;
        struct clk                      *clk;
 
        int                             ret = -ENOMEM;
@@ -523,12 +528,6 @@ static int davinci_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        clk = clk_get(&pdev->dev, "usb");
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "failed to get clock\n");
@@ -542,12 +541,7 @@ static int davinci_probe(struct platform_device *pdev)
                goto err4;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &davinci_dmamask;
-       musb->dev.coherent_dma_mask     = davinci_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->clk                       = clk;
 
        pdata->platform_ops             = &davinci_ops;
@@ -567,22 +561,17 @@ static int davinci_probe(struct platform_device *pdev)
        musb_resources[1].end = pdev->resource[1].end;
        musb_resources[1].flags = pdev->resource[1].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err5;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err5;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = davinci_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err5;
        }
 
@@ -595,9 +584,6 @@ err4:
        clk_put(clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index 18e877ffe7b7d8c7393bdf43efdfd0d58e618011..cd70cc8861711015f5443e2b76e969209d9bdb3a 100644 (file)
@@ -921,6 +921,52 @@ static void musb_generic_disable(struct musb *musb)
 
 }
 
+/*
+ * Program the HDRC to start (enable interrupts, dma, etc.).
+ */
+void musb_start(struct musb *musb)
+{
+       void __iomem    *regs = musb->mregs;
+       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
+
+       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+
+       /*  Set INT enable registers, enable interrupts */
+       musb->intrtxe = musb->epmask;
+       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+       musb->intrrxe = musb->epmask & 0xfffe;
+       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
+       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+       musb_writeb(regs, MUSB_TESTMODE, 0);
+
+       /* put into basic highspeed mode and start session */
+       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+                       | MUSB_POWER_HSENAB
+                       /* ENSUSPEND wedges tusb */
+                       /* | MUSB_POWER_ENSUSPEND */
+                  );
+
+       musb->is_active = 0;
+       devctl = musb_readb(regs, MUSB_DEVCTL);
+       devctl &= ~MUSB_DEVCTL_SESSION;
+
+       /* session started after:
+        * (a) ID-grounded irq, host mode;
+        * (b) vbus present/connect IRQ, peripheral mode;
+        * (c) peripheral initiates, using SRP
+        */
+       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+                       (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+               musb->is_active = 1;
+       } else {
+               devctl |= MUSB_DEVCTL_SESSION;
+       }
+
+       musb_platform_enable(musb);
+       musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
 /*
  * Make the HDRC stop (disable interrupts, etc.);
  * reversible by musb_start
index 65f3917b4fc5fb55c95bf4157d70a1d7194a110d..1c5bf75ee8ff8a45a3da1fca7cb1d31920f0eeff 100644 (file)
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
 extern const char musb_driver_name[];
 
 extern void musb_stop(struct musb *musb);
+extern void musb_start(struct musb *musb);
 
 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
index 4047cbb91bac4e541b914801fe8e12c92d923625..bd4138d80a48f243c9d4cb32527196d0024eb3d1 100644 (file)
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
        struct dsps_glue *glue;
        int ret;
 
+       if (!strcmp(pdev->name, "musb-hdrc"))
+               return -ENODEV;
+
        match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
        if (!match) {
                dev_err(&pdev->dev, "fail to get matching of_match struct\n");
index 9a08679d204dfe9ee6fb1ad421813ea329c25abd..3671898a4535b3cf2de3c0e188f4947485d2760d 100644 (file)
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
        musb->g.max_speed = USB_SPEED_HIGH;
        musb->g.speed = USB_SPEED_UNKNOWN;
 
+       MUSB_DEV_MODE(musb);
+       musb->xceiv->otg->default_a = 0;
+       musb->xceiv->state = OTG_STATE_B_IDLE;
+
        /* this "gadget" abstracts/virtualizes the controller */
        musb->g.name = musb_driver_name;
        musb->g.is_otg = 1;
@@ -1855,6 +1859,8 @@ static int musb_gadget_start(struct usb_gadget *g,
        musb->xceiv->state = OTG_STATE_B_IDLE;
        spin_unlock_irqrestore(&musb->lock, flags);
 
+       musb_start(musb);
+
        /* REVISIT:  funcall to other code, which also
         * handles power budgeting ... this way also
         * ensures HdrcStart is indirectly called.
index a523950c2b32e66e07f7c8bd78daa181f691f25c..d1d6b83aabca61df43dffccf57ccbe7687cbe399 100644 (file)
 
 #include "musb_core.h"
 
-/*
-* Program the HDRC to start (enable interrupts, dma, etc.).
-*/
-static void musb_start(struct musb *musb)
-{
-       void __iomem    *regs = musb->mregs;
-       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
-
-       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
-
-       /*  Set INT enable registers, enable interrupts */
-       musb->intrtxe = musb->epmask;
-       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
-       musb->intrrxe = musb->epmask & 0xfffe;
-       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
-       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
-
-       musb_writeb(regs, MUSB_TESTMODE, 0);
-
-       /* put into basic highspeed mode and start session */
-       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
-                                               | MUSB_POWER_HSENAB
-                                               /* ENSUSPEND wedges tusb */
-                                               /* | MUSB_POWER_ENSUSPEND */
-                                               );
-
-       musb->is_active = 0;
-       devctl = musb_readb(regs, MUSB_DEVCTL);
-       devctl &= ~MUSB_DEVCTL_SESSION;
-
-       /* session started after:
-        * (a) ID-grounded irq, host mode;
-        * (b) vbus present/connect IRQ, peripheral mode;
-        * (c) peripheral initiates, using SRP
-        */
-       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
-           (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
-               musb->is_active = 1;
-       } else {
-               devctl |= MUSB_DEVCTL_SESSION;
-       }
-
-       musb_platform_enable(musb);
-       musb_writeb(regs, MUSB_DEVCTL, devctl);
-}
-
 static void musb_port_suspend(struct musb *musb, bool do_suspend)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
index b3b3ed723882ffab75829e7c9bc1b08b42e1605d..4432314d70ee18f1dfe0e092c487dcab224c1742 100644 (file)
@@ -1152,7 +1152,11 @@ static const struct musb_platform_ops tusb_ops = {
        .set_vbus       = tusb_musb_set_vbus,
 };
 
-static u64 tusb_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info tusb_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int tusb_probe(struct platform_device *pdev)
 {
@@ -1160,7 +1164,7 @@ static int tusb_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct tusb6010_glue            *glue;
-
+       struct platform_device_info     pinfo;
        int                             ret = -ENOMEM;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
@@ -1169,18 +1173,7 @@ static int tusb_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &tusb_dmamask;
-       musb->dev.coherent_dma_mask     = tusb_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
 
        pdata->platform_ops             = &tusb_ops;
 
@@ -1204,31 +1197,23 @@ static int tusb_probe(struct platform_device *pdev)
        musb_resources[2].end = pdev->resource[2].end;
        musb_resources[2].flags = pdev->resource[2].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err3;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err3;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = tusb_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err3;
        }
 
        return 0;
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index b2f29c9aebbfdb15e9e9be8004f2e8d7bf5ed60d..02799a5efcd448b8c38ee85de7cad8452ffbb617 100644 (file)
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
 
 /* platform driver interface */
 
-static int __init gpio_vbus_probe(struct platform_device *pdev)
+static int gpio_vbus_probe(struct platform_device *pdev)
 {
        struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
        struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
        return err;
 }
 
-static int __exit gpio_vbus_remove(struct platform_device *pdev)
+static int gpio_vbus_remove(struct platform_device *pdev)
 {
        struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
        struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
 };
 #endif
 
-/* NOTE:  the gpio-vbus device may *NOT* be hotplugged */
-
 MODULE_ALIAS("platform:gpio-vbus");
 
 static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
                .pm = &gpio_vbus_dev_pm_ops,
 #endif
        },
-       .remove  = __exit_p(gpio_vbus_remove),
+       .probe          = gpio_vbus_probe,
+       .remove         = gpio_vbus_remove,
 };
 
-module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe);
+module_platform_driver(gpio_vbus_driver);
 
 MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
 MODULE_AUTHOR("Philipp Zabel");
index fc15694d3031bbf83b76e932914e44251280d491..4e8a0405f956c478491a1ed6f18d65a9afc07e9a 100644 (file)
@@ -79,7 +79,7 @@ static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
                        return &dpll_map[i].params;
        }
 
-       return 0;
+       return NULL;
 }
 
 static int omap_usb3_suspend(struct usb_phy *x, int suspend)
index c454bfa22a106184bbaee567e4f49fbad5d8d057..ddb9c51f2c999c04a27aab9d7a2924c429205672 100644 (file)
@@ -60,7 +60,7 @@ config USB_SERIAL_SIMPLE
                - Suunto ANT+ USB device.
                - Fundamental Software dongle.
                - HP4x calculators
-               - a number of Motoroloa phones
+               - a number of Motorola phones
                - Siemens USB/MPI adapter.
                - ViVOtech ViVOpay USB device.
                - Infineon Modem Flashloader USB interface
index 1cf6f125f5f078624b41bd1256686d35f28ee4b7..acaee066b99aa10e5ecf2a8e75cffd31899b6507 100644 (file)
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
 
 #define HUAWEI_VENDOR_ID                       0x12D1
 #define HUAWEI_PRODUCT_E173                    0x140C
+#define HUAWEI_PRODUCT_E1750                   0x1406
 #define HUAWEI_PRODUCT_K4505                   0x1464
 #define HUAWEI_PRODUCT_K3765                   0x1465
 #define HUAWEI_PRODUCT_K4605                   0x14C6
@@ -450,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
 #define CHANGHONG_VENDOR_ID                    0x2077
 #define CHANGHONG_PRODUCT_CH690                        0x7001
 
+/* Inovia */
+#define INOVIA_VENDOR_ID                       0x20a6
+#define INOVIA_SEW858                          0x1105
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -567,6 +572,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
@@ -686,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
 
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1254,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
 
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+       },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1342,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index e7a84f0f517969fec4bbb59ac270eeb97fbf22d3..bedf8e47713be02dfc80b8a2e3512cb24a455a15 100644 (file)
@@ -139,6 +139,7 @@ enum pl2303_type {
        HX_TA,          /* HX(A) / X(A) / TA version  */ /* TODO: improve */
        HXD_EA_RA_SA,   /* HXD / EA / RA / SA version */ /* TODO: improve */
        TB,             /* TB version */
+       HX_CLONE,       /* Cheap and less functional clone of the HX chip */
 };
 /*
  * NOTE: don't know the difference between type 0 and type 1,
@@ -206,8 +207,23 @@ static int pl2303_startup(struct usb_serial *serial)
                 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
                 */
                if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
-                       type = HX_TA;
-                       type_str = "X/HX/TA";
+                       /* Check if the device is a clone */
+                       pl2303_vendor_read(0x9494, 0, serial, buf);
+                       /*
+                        * NOTE: Not sure if this read is really needed.
+                        * The HX returns 0x00, the clone 0x02, but the Windows
+                        * driver seems to ignore the value and continues.
+                        */
+                       pl2303_vendor_write(0x0606, 0xaa, serial);
+                       pl2303_vendor_read(0x8686, 0, serial, buf);
+                       if (buf[0] != 0xaa) {
+                               type = HX_CLONE;
+                               type_str = "X/HX clone (limited functionality)";
+                       } else {
+                               type = HX_TA;
+                               type_str = "X/HX/TA";
+                       }
+                       pl2303_vendor_write(0x0606, 0x00, serial);
                } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
                                                                     == 0x400) {
                        type = HXD_EA_RA_SA;
@@ -305,8 +321,9 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
 {
        /*
         * NOTE: Only the values defined in baud_sup are supported !
-        *       => if unsupported values are set, the PL2303 seems to
-        *          use 9600 baud (at least my PL2303X always does)
+        * => if unsupported values are set, the PL2303 uses 9600 baud instead
+        * => HX clones just don't work at unsupported baud rates < 115200 baud,
+        *    for baud rates > 115200 they run at 115200 baud
         */
        const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
                                 4800, 7200, 9600, 14400, 19200, 28800, 38400,
@@ -316,14 +333,14 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
         * NOTE: With the exception of type_0/1 devices, the following
         * additional baud rates are supported (tested with HX rev. 3A only):
         * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
-        * 403200, 806400.      (*: not HX)
+        * 403200, 806400.      (*: not HX and HX clones)
         *
         * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
-        *                 type_0+1: 1228800; RA: 921600; SA: 115200
+        *                 type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
         *
         * As long as we are not using this encoding method for anything else
-        * than the type_0+1 and HX chips, there is no point in complicating
-        * the code to support them.
+        * than the type_0+1, HX and HX clone chips, there is no point in
+        * complicating the code to support them.
         */
        int i;
 
@@ -347,6 +364,8 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
                baud = min_t(int, baud, 6000000);
        else if (type == type_0 || type == type_1)
                baud = min_t(int, baud, 1228800);
+       else if (type == HX_CLONE)
+               baud = min_t(int, baud, 115200);
        /* Direct (standard) baud rate encoding method */
        put_unaligned_le32(baud, buf);
 
@@ -359,7 +378,8 @@ static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
        /*
         * Divisor based baud rate encoding method
         *
-        * NOTE: it's not clear if the type_0/1 chips support this method
+        * NOTE: HX clones do NOT support this method.
+        * It's not clear if the type_0/1 chips support it.
         *
         * divisor = 12MHz * 32 / baudrate = 2^A * B
         *
@@ -452,7 +472,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
         * 1) Direct method: encodes the baud rate value directly
         *    => supported by all chip types
         * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
-        *    => supported by HX chips (and likely not by type_0/1 chips)
+        *    => not supported by HX clones (and likely type_0/1 chips)
         *
         * NOTE: Although the divisor based baud rate encoding method is much
         * more flexible, some of the standard baud rate values can not be
@@ -460,7 +480,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
         * the device likely uses the same baud rate generator for both methods
         * so that there is likley no difference.
         */
-       if (type == type_0 || type == type_1)
+       if (type == type_0 || type == type_1 || type == HX_CLONE)
                baud = pl2303_baudrate_encode_direct(baud, type, buf);
        else
                baud = pl2303_baudrate_encode_divisor(baud, type, buf);
@@ -813,6 +833,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
        result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
                                 0, NULL, 0, 100);
+       /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
        if (result)
                dev_err(&port->dev, "error sending break = %d\n", result);
 }
index 760b78560f67fd7e7d905d53dff2cb7a46f2919f..c9a35697ebe9a6e131d5a9c145e5f3f3415dd5bf 100644 (file)
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
        { }     /* terminator */
 };
index 94d75edef77fdf34e4f5e341e5952e532f03f09e..18509e6c21ab84e7d3133f7a0e428ba462d2a24b 100644 (file)
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
                /*
                 * Many devices do not respond properly to READ_CAPACITY_16.
                 * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                * However some USB 3.0 drive enclosures return capacity
+                * modulo 2TB. Those must use READ_CAPACITY_16
                 */
-               sdev->try_rc_10_first = 1;
+               if (!(us->fflags & US_FL_NEEDS_CAP16))
+                       sdev->try_rc_10_first = 1;
 
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
index c015f2c16729c5b4030fc829fae9a3cfb90acde6..de32cfa5bfa6ca0772d0955ea83b9b98dd698ea5 100644 (file)
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(  0x174c, 0x55aa, 0x0100, 0x0100,
+               "ASMedia",
+               "AS2105",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NEEDS_CAP16),
+
 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
 UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                "Yarvik",
index a9807dea3887af0afe44e545784f70d46efa40dd..4fb7a8f83c8a99ff8d3412a5328b2407c98409a8 100644 (file)
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        long npage;
        int ret = 0, prot = 0;
        uint64_t mask;
+       struct vfio_dma *dma = NULL;
+       unsigned long pfn;
 
        end = map->iova + map->size;
 
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        }
 
        for (iova = map->iova; iova < end; iova += size, vaddr += size) {
-               struct vfio_dma *dma = NULL;
-               unsigned long pfn;
                long i;
 
                /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (npage <= 0) {
                        WARN_ON(!npage);
                        ret = (int)npage;
-                       break;
+                       goto out;
                }
 
                /* Verify pages are not already mapped */
                for (i = 0; i < npage; i++) {
                        if (iommu_iova_to_phys(iommu->domain,
                                               iova + (i << PAGE_SHIFT))) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -EBUSY;
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (ret) {
                        if (ret != -EBUSY ||
                            map_try_harder(iommu, iova, pfn, npage, prot)) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
                        if (!dma) {
                                iommu_unmap(iommu->domain, iova, size);
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -ENOMEM;
-                               break;
+                               goto out_unpin;
                        }
 
                        dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                }
        }
 
-       if (ret) {
-               struct vfio_dma *tmp;
-               iova = map->iova;
-               size = map->size;
-               while ((tmp = vfio_find_dma(iommu, iova, size))) {
-                       int r = vfio_remove_dma_overlap(iommu, iova,
-                                                       &size, tmp);
-                       if (WARN_ON(r || !size))
-                               break;
-               }
+       WARN_ON(ret);
+       mutex_unlock(&iommu->lock);
+       return ret;
+
+out_unpin:
+       vfio_unpin_pages(pfn, npage, prot, true);
+
+out:
+       iova = map->iova;
+       size = map->size;
+       while ((dma = vfio_find_dma(iommu, iova, size))) {
+               int r = vfio_remove_dma_overlap(iommu, iova,
+                                               &size, dma);
+               if (WARN_ON(r || !size))
+                       break;
        }
 
        mutex_unlock(&iommu->lock);
index 4b79a1f2f901e86eb7726fd64d1af3bac66e7385..ce5221fa393a8db2ca9f4de61817ae230f3f0bc8 100644 (file)
@@ -461,7 +461,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
                u32 i;
                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
-        }
+       }
 
        tcm_vhost_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
        }
        se_sess = tv_nexus->tvn_se_sess;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       if (tag < 0) {
+               pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
        cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
        pages = cmd->tvc_upages;
@@ -1373,21 +1378,30 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
        return 0;
 }
 
+static void vhost_scsi_free(struct vhost_scsi *vs)
+{
+       if (is_vmalloc_addr(vs))
+               vfree(vs);
+       else
+               kfree(vs);
+}
+
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *vs;
        struct vhost_virtqueue **vqs;
-       int r, i;
+       int r = -ENOMEM, i;
 
-       vs = kzalloc(sizeof(*vs), GFP_KERNEL);
-       if (!vs)
-               return -ENOMEM;
+       vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!vs) {
+               vs = vzalloc(sizeof(*vs));
+               if (!vs)
+                       goto err_vs;
+       }
 
        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
-       if (!vqs) {
-               kfree(vs);
-               return -ENOMEM;
-       }
+       if (!vqs)
+               goto err_vqs;
 
        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
        vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
@@ -1407,14 +1421,18 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
 
        tcm_vhost_init_inflight(vs, NULL);
 
-       if (r < 0) {
-               kfree(vqs);
-               kfree(vs);
-               return r;
-       }
+       if (r < 0)
+               goto err_init;
 
        f->private_data = vs;
        return 0;
+
+err_init:
+       kfree(vqs);
+err_vqs:
+       vhost_scsi_free(vs);
+err_vs:
+       return r;
 }
 
 static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1431,7 +1449,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
        vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
-       kfree(vs);
+       vhost_scsi_free(vs);
        return 0;
 }
 
index 9a9502a4aa5089519d338ca62b07a557069fb00d..69068e0d8f31af183c075afe3026d60b09a0b55e 100644 (file)
@@ -161,9 +161,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
        if (list_empty(&work->node)) {
                list_add_tail(&work->node, &dev->work_list);
                work->queue_seq++;
+               spin_unlock_irqrestore(&dev->work_lock, flags);
                wake_up_process(dev->worker);
+       } else {
+               spin_unlock_irqrestore(&dev->work_lock, flags);
        }
-       spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
 
index 0a2cce7285be99dd8aaa7983a43bbeae7e6b2550..afe4702a5528a0ea9675cdabfce8ee4243e3799a 100644 (file)
@@ -10,6 +10,7 @@
  *
  *  ARM PrimeCell PL110 Color LCD Controller
  */
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -551,6 +552,10 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
        if (!board)
                return -EINVAL;
 
+       ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out;
+
        ret = amba_request_regions(dev, NULL);
        if (ret) {
                printk(KERN_ERR "CLCD: unable to reserve regs region\n");
index 94a403a9717ae9f84c941e334098bbc0c0713bbb..5d05555fe841d2a4eaac7d230e498e0c51f5d841 100644 (file)
@@ -21,6 +21,9 @@
 #include <asm/backlight.h>
 #endif
 
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
 static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name,
        mutex_unlock(&pmac_backlight_mutex);
 #endif
 
+       mutex_lock(&backlight_dev_list_mutex);
+       list_add(&new_bd->entry, &backlight_dev_list);
+       mutex_unlock(&backlight_dev_list_mutex);
+
        return new_bd;
 }
 EXPORT_SYMBOL(backlight_device_register);
 
+bool backlight_device_registered(enum backlight_type type)
+{
+       bool found = false;
+       struct backlight_device *bd;
+
+       mutex_lock(&backlight_dev_list_mutex);
+       list_for_each_entry(bd, &backlight_dev_list, entry) {
+               if (bd->props.type == type) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&backlight_dev_list_mutex);
+
+       return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
 /**
  * backlight_device_unregister - unregisters a backlight device object.
  * @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd)
        if (!bd)
                return;
 
+       mutex_lock(&backlight_dev_list_mutex);
+       list_del(&bd->entry);
+       mutex_unlock(&backlight_dev_list_mutex);
+
 #ifdef CONFIG_PMAC_BACKLIGHT
        mutex_lock(&pmac_backlight_mutex);
        if (pmac_backlight == bd)
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void)
 
        backlight_class->dev_groups = bl_device_groups;
        backlight_class->pm = &backlight_class_dev_pm_ops;
+       INIT_LIST_HEAD(&backlight_dev_list);
+       mutex_init(&backlight_dev_list_mutex);
        return 0;
 }
 
index 75dca19bf2149a968f733ed1ff8a0cd468baa745..6ac755270ab46d1a3ef830c89338c0daab519664 100644 (file)
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
        if (IS_ERR(ctrl->clk)) {
                dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
                ret = -ENOENT;
-               goto failed_get_clk;
+               goto failed;
        }
        clk_prepare_enable(ctrl->clk);
 
@@ -551,21 +551,8 @@ failed_path_init:
                path_deinit(path_plat);
        }
 
-       if (ctrl->clk) {
-               devm_clk_put(ctrl->dev, ctrl->clk);
-               clk_disable_unprepare(ctrl->clk);
-       }
-failed_get_clk:
-       devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+       clk_disable_unprepare(ctrl->clk);
 failed:
-       if (ctrl) {
-               if (ctrl->reg_base)
-                       devm_iounmap(ctrl->dev, ctrl->reg_base);
-               devm_release_mem_region(ctrl->dev, res->start,
-                               resource_size(res));
-               devm_kfree(ctrl->dev, ctrl);
-       }
-
        dev_err(&pdev->dev, "device init failed\n");
 
        return ret;
index d250ed0f806d3bf66d858e9f633852f3baa58f4d..27197a8048c0a7aa9ffcbd3c8a2280ae12866dcc 100644 (file)
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
                break;
        case 3:
                bits_per_pixel = 32;
+               break;
        case 1:
        default:
                return -EINVAL;
index 7ef079c146e7242c9edee33cc43bc0cd3c850ce1..c172a5281f9e6c9369b0cec236e3bbf2abb875ac 100644 (file)
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
                        info->monspecs.modedb, 16)) {
                printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
+               err = -EINVAL;
                goto err_map_video;
        }
 
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
               info->fix.smem_len >> 10, info->var.xres,
               info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
 
-       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+       err = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (err < 0)
                goto err_map_video;
 
        err = register_framebuffer(info);
index 171821ddd78de381a866e9912bece023a8fcaca5..ba5b40f581f6fdfe1849a3d4d62c8f38ea65dab1 100644 (file)
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
                return -EINVAL;
        }
 
-       timing_np = of_find_node_by_name(np, name);
+       timing_np = of_get_child_by_name(np, name);
        if (!timing_np) {
                pr_err("%s: could not find node '%s'\n",
                        of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
        struct display_timings *disp;
 
        if (!np) {
-               pr_err("%s: no devicenode given\n", of_node_full_name(np));
+               pr_err("%s: no device node given\n", of_node_full_name(np));
                return NULL;
        }
 
-       timings_np = of_find_node_by_name(np, "display-timings");
+       timings_np = of_get_child_by_name(np, "display-timings");
        if (!timings_np) {
                pr_err("%s: could not find display-timings node\n",
                        of_node_full_name(np));
index 6c90885b094020f33b74d591db6a93edcbd4aeb7..10b25e7cd878c6e7a5657fd8c1a4865b0935c004 100644 (file)
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
 
 config DISPLAY_PANEL_DSI_CM
        tristate "Generic DSI Command Mode Panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DSI command mode panels.
 
index 1b60698f141ed983e2661743f2a5c79f471a02a6..ccd9073f706f6d59242a5b1866d74dcd2c9bffa6 100644 (file)
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index bc5f8ceda371b1b26308db7c644c6913445c8b18..63d88ee6dfe410469215455795bd4b63b031a040 100644 (file)
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index c5826716d6abbb8b28e85ccd1d70aaa8c0058b44..9abe2c039ae9c44f0ea2cb2a3362c021e9273df4 100644 (file)
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index 02a7340111dfbf4b856122aae670bdbff2c34150..477975009eee87e89c34cc773e2d5a8818f918d6 100644 (file)
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
+       pm_runtime_irq_safe(&pdev->dev);
 
        r = dispc_runtime_get();
        if (r)
index 47ca86c5c6c0b3d1d609a001210e5eb132616076..d838ba829459400acc86388cbeda1bab77f57b24 100644 (file)
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        (info->var.bits_per_pixel * info->var.xres_virtual);
        if (info->var.yres_virtual < info->var.yres) {
                dev_err(info->device, "virtual vertical size smaller than real\n");
-               goto err_find_mode;
-       }
-
-       /* maximize virtual vertical size for fast scrolling */
-       info->var.yres_virtual = info->fix.smem_len * 8 /
-                       (info->var.bits_per_pixel * info->var.xres_virtual);
-       if (info->var.yres_virtual < info->var.yres) {
-               dev_err(info->device, "virtual vertical size smaller than real\n");
+               rc = -EINVAL;
                goto err_find_mode;
        }
 
index c7c64f18773d87d5f23e81ec90ba7d8bfaee504a..fa932c2f7d97276b66199eda06b6edb6565208bb 100644 (file)
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
        sl = dev_to_w1_slave(dev);
        fops = sl->family->fops;
 
+       if (!fops)
+               return 0;
+
        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
                /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
        atomic_set(&sl->refcnt, 0);
        init_completion(&sl->released);
 
+       /* slave modules need to be loaded in a context with unlocked mutex */
+       mutex_unlock(&dev->mutex);
        request_module("w1-family-0x%0x", rn->family);
+       mutex_lock(&dev->mutex);
 
        spin_lock(&w1_flock);
        f = w1_family_registered(rn->family);
index d1d53f301de7589960ea07322fdccde47fb3f7fa..6df632e0bb555d1841bae04c4a8ee5862a50896d 100644 (file)
@@ -418,8 +418,6 @@ config BFIN_WDT
 
 # FRV Architecture
 
-# H8300 Architecture
-
 # X86 (i386 + ia64 + x86_64) Architecture
 
 config ACQUIRE_WDT
index 6c5bb274d3cd22dfa4814e0c4a341e042dcd381c..8c7b8bcbbdc5b83d6f22584a144de9a3dbc3837e 100644 (file)
@@ -66,8 +66,6 @@ obj-$(CONFIG_BFIN_WDT) += bfin_wdt.o
 
 # FRV Architecture
 
-# H8300 Architecture
-
 # X86 (i386 + ia64 + x86_64) Architecture
 obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
 obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
index 5be5e3d14f794a3bdc446b78b5d0bb3fcb9b7fa9..19f3c3fc65f4c9af0ffa4a25cf8410e308a5fda8 100644 (file)
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev,
                return -ENODEV;
        }
 
+       /*
+        * Ignore all auxilary iLO devices with the following PCI ID
+        */
+       if (dev->subsystem_device == 0x1979)
+               return -ENODEV;
+
        if (pci_enable_device(dev)) {
                dev_warn(&dev->dev,
                        "Not possible to enable PCI Device: 0x%x:0x%x.\n",
index 491419e0772a83f89ae34f16a96750d2349407d1..5c3d4df63e6835f534cb57a3eca724ce08c3ba47 100644 (file)
@@ -35,7 +35,7 @@
 #define KEMPLD_WDT_STAGE_TIMEOUT(x)    (0x1b + (x) * 4)
 #define KEMPLD_WDT_STAGE_CFG(x)                (0x18 + (x))
 #define STAGE_CFG_GET_PRESCALER(x)     (((x) & 0x30) >> 4)
-#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x30) << 4)
+#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x3) << 4)
 #define STAGE_CFG_PRESCALER_MASK       0x30
 #define STAGE_CFG_ACTION_MASK          0x7
 #define STAGE_CFG_ASSERT               (1 << 3)
index 1f94b42764aabb95ab9d2cb38cda01611123df56..f6caa77151c74a4fc827d933715c92c57da5e0b7 100644 (file)
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = {
        .set_timeout    = sunxi_wdt_set_timeout,
 };
 
-static int __init sunxi_wdt_probe(struct platform_device *pdev)
+static int sunxi_wdt_probe(struct platform_device *pdev)
 {
        struct sunxi_wdt_dev *sunxi_wdt;
        struct resource *res;
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit sunxi_wdt_remove(struct platform_device *pdev)
+static int sunxi_wdt_remove(struct platform_device *pdev)
 {
        struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
 
index 42913f131dc2b051cdb89db52e72726bb228dd61..c9b0c627fe7e6d8513dd4d43ba18cb65cfbba132 100644 (file)
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
 
        case WDIOC_GETSTATUS:
        case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
+               error = put_user(0, p);
+               break;
 
        case WDIOC_KEEPALIVE:
                ts72xx_wdt_kick(wdt);
index a50c6e3a7cc4824db07f43e84732ee64a58b65cb..b232908a61925724bb61bc0f8a09ecbca6b753e6 100644 (file)
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
 
-       scratch_page = get_balloon_scratch_page();
-
        for (i = 0; i < nr_pages; i++) {
                page = alloc_page(gfp);
                if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
                scrub_page(page);
 
+               /*
+                * Ballooned out frames are effectively replaced with
+                * a scratch frame.  Ensure direct mappings and the
+                * p2m are consistent.
+                */
+               scratch_page = get_balloon_scratch_page();
 #ifdef CONFIG_XEN_HAVE_PVMMU
                if (xen_pv_domain() && !PageHighMem(page)) {
                        ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        BUG_ON(ret);
                }
 #endif
-       }
-
-       /* Ensure that ballooned highmem pages don't have kmaps. */
-       kmap_flush_unused();
-       flush_tlb_all();
-
-       /* No more mappings: invalidate P2M and add to balloon. */
-       for (i = 0; i < nr_pages; i++) {
-               pfn = mfn_to_pfn(frame_list[i]);
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        unsigned long p;
                        p = page_to_pfn(scratch_page);
                        __set_phys_to_machine(pfn, pfn_to_mfn(p));
                }
+               put_balloon_scratch_page();
+
                balloon_append(pfn_to_page(pfn));
        }
 
-       put_balloon_scratch_page();
+       /* Ensure that ballooned highmem pages don't have kmaps. */
+       kmap_flush_unused();
+       flush_tlb_all();
 
        set_xen_guest_handle(reservation.extent_start, frame_list);
        reservation.nr_extents   = nr_pages;
index a9ea73d6dcf311b7703af1225fb71698da2a4fc3..a69260f27555df85894618dc1647422971914dc4 100644 (file)
@@ -90,7 +90,7 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 
        v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
                                                &v9fs_cache_session_index_def,
-                                               v9ses);
+                                               v9ses, true);
        p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
                 v9ses, v9ses->fscache);
 }
@@ -204,7 +204,7 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
        v9ses = v9fs_inode2v9ses(inode);
        v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
                                                  &v9fs_cache_inode_index_def,
-                                                 v9inode);
+                                                 v9inode, true);
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
                 inode, v9inode->fscache);
@@ -239,13 +239,12 @@ void v9fs_cache_inode_flush_cookie(struct inode *inode)
 void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
-       struct p9_fid *fid;
 
        if (!v9inode->fscache)
                return;
 
        spin_lock(&v9inode->fscache_lock);
-       fid = filp->private_data;
+
        if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
                v9fs_cache_inode_flush_cookie(inode);
        else
@@ -271,7 +270,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
        v9ses = v9fs_inode2v9ses(inode);
        v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
                                                  &v9fs_cache_inode_index_def,
-                                                 v9inode);
+                                                 v9inode, true);
        p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
                 inode, old, v9inode->fscache);
 
index 40cc54ced5d97621b81bc96ab8b90f3644f72a4e..2f96754910950b59b26ac4a2046808cc7bf043c1 100644 (file)
@@ -101,6 +101,18 @@ static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
 
 #else /* CONFIG_9P_FSCACHE */
 
+static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
+{
+}
+
 static inline int v9fs_fscache_release_page(struct page *page,
                                            gfp_t gfp) {
        return 1;
index 9ff073f4090afee750e4058129427d4bd6f6117f..da0821bc05b9d2f56fb56e63c99a1b58ae57895d 100644 (file)
@@ -241,9 +241,8 @@ static int v9fs_launder_page(struct page *page)
  * v9fs_direct_IO - 9P address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
  *
  * The presence of v9fs_direct_IO() in the address space ops vector
  * allowes open() O_DIRECT flags which would have failed otherwise.
@@ -252,13 +251,12 @@ static int v9fs_launder_page(struct page *page)
  * the VFS gets them, so this method should never be called.
  *
  * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
+ * this routine is called through generic_file_read_iter(), the read/write
+ * fails with an error.
  *
  */
 static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-              loff_t pos, unsigned long nr_segs)
+v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        /*
         * FIXME
@@ -267,7 +265,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
                 iocb->ki_filp->f_path.dentry->d_name.name,
-                (long long)pos, nr_segs);
+                (long long)pos, iter->nr_segs);
 
        return -EINVAL;
 }
index aa5ecf479a57bf1614100011de047d8c184c620e..ec99a96f86eb70b093ae4fc1aed4a314b00eedce 100644 (file)
@@ -105,10 +105,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                v9inode->writeback_fid = (void *) fid;
        }
        mutex_unlock(&v9inode->v_mutex);
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(inode, file);
-#endif
        return 0;
 out_error:
        p9_client_clunk(file->private_data);
@@ -463,14 +461,12 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
        int n;
        loff_t i_size;
        size_t total = 0;
-       struct p9_client *clnt;
        loff_t origin = *offset;
        unsigned long pg_start, pg_end;
 
        p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
                 data, (int)count, (int)*offset);
 
-       clnt = fid->clnt;
        do {
                n = p9_client_write(fid, NULL, data+total, origin+total, count);
                if (n <= 0)
@@ -743,8 +739,8 @@ const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
@@ -756,8 +752,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock_dotl,
index 94de6d1482e2e076c4b3d10451c39fc245cce6ef..af7d531bdecdedcc4e7cef7a99dc36347b1c1163 100644 (file)
@@ -448,9 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
        clear_inode(inode);
        filemap_fdatawrite(inode->i_mapping);
 
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_put_cookie(inode);
-#endif
        /* clunk the fid stashed in writeback_fid */
        if (v9inode->writeback_fid) {
                p9_client_clunk(v9inode->writeback_fid);
@@ -531,9 +529,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode(st, inode, sb);
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_get_cookie(inode);
-#endif
        unlock_new_inode(inode);
        return inode;
 error:
@@ -783,7 +779,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      unsigned int flags)
 {
        struct dentry *res;
-       struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
        struct inode *inode;
@@ -795,7 +790,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        if (dentry->d_name.len > NAME_MAX)
                return ERR_PTR(-ENAMETOOLONG);
 
-       sb = dir->i_sb;
        v9ses = v9fs_inode2v9ses(dir);
        /* We can walk d_parent because we hold the dir->i_mutex */
        dfid = v9fs_fid_lookup(dentry->d_parent);
@@ -867,7 +861,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
                return finish_no_open(file, res);
 
        err = 0;
-       fid = NULL;
+
        v9ses = v9fs_inode2v9ses(dir);
        perm = unixmode2p9mode(v9ses, mode);
        fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
@@ -905,10 +899,8 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
                goto error;
 
        file->private_data = fid;
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(dentry->d_inode, file);
-#endif
 
        *opened |= FILE_CREATED;
 out:
index a7c481402c4654416106d22d1ed7d85f65f6f8e1..ecacec098fbb44784ee83d5064de14a33a44e6f6 100644 (file)
@@ -141,9 +141,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode_dotl(st, inode);
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_get_cookie(inode);
-#endif
        retval = v9fs_get_acl(inode, fid);
        if (retval)
                goto error;
@@ -355,10 +353,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        if (err)
                goto err_clunk_old_fid;
        file->private_data = ofid;
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(inode, file);
-#endif
        *opened |= FILE_CREATED;
 out:
        v9fs_put_acl(dacl, pacl);
@@ -477,13 +473,11 @@ static int
 v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
                 struct kstat *stat)
 {
-       int err;
        struct v9fs_session_info *v9ses;
        struct p9_fid *fid;
        struct p9_stat_dotl *st;
 
        p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
-       err = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
                generic_fillattr(dentry->d_inode, stat);
@@ -560,7 +554,6 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
 int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 {
        int retval;
-       struct v9fs_session_info *v9ses;
        struct p9_fid *fid;
        struct p9_iattr_dotl p9attr;
        struct inode *inode = dentry->d_inode;
@@ -581,8 +574,6 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
        p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
        p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
 
-       retval = -EPERM;
-       v9ses = v9fs_dentry2v9ses(dentry);
        fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
                return PTR_ERR(fid);
index 4fe6df3ec28fe5392b680e853b0fbfef6313179a..1afa0e020082f8279ce3f8708131c4f74ba49941 100644 (file)
@@ -11,7 +11,7 @@ obj-y :=      open.o read_write.o file_table.o super.o \
                attr.o bad_inode.o file.o filesystems.o namespace.o \
                seq_file.o xattr.o libfs.o fs-writeback.o \
                pnode.o splice.o sync.o utimes.o \
-               stack.o fs_struct.o statfs.o
+               stack.o fs_struct.o statfs.o iov-iter.o
 
 ifeq ($(CONFIG_BLOCK),y)
 obj-y +=       buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
index a36da5382b40dc9c09ab6aa59762d9c1a7808b72..da1e02161ac3386da0489d32834ae932c46bac72 100644 (file)
 const struct file_operations adfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .splice_read    = generic_file_splice_read,
 };
 
index 8669b6ecddee4cc030e21f39358ce2cf5058bddb..664f743c2d8d089cfdbc3a84ac5916f0867650fb 100644 (file)
@@ -28,9 +28,9 @@ static int affs_file_release(struct inode *inode, struct file *filp);
 const struct file_operations affs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = affs_file_open,
        .release        = affs_file_release,
index 3c090b7555ea8f3a095e0504a69cbd696db13ab5..ca0a3cf93791879578121b842891e97e5b0ffb1e 100644 (file)
@@ -179,7 +179,7 @@ struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
        /* put it up for caching (this never returns an error) */
        cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
                                             &afs_cell_cache_index_def,
-                                            cell);
+                                            cell, true);
 #endif
 
        /* add to the cell lists */
index 646337dc5201e702227309cc17db99f96af99173..529300327f4574d2dc36345be3c7398442548e08 100644 (file)
@@ -600,9 +600,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        /* lock down the parent dentry so we can peer at it */
        parent = dget_parent(dentry);
-       if (!parent->d_inode)
-               goto out_bad;
-
        dir = AFS_FS_I(parent->d_inode);
 
        /* validate the parent directory */
index 66d50fe2ee459a887511381e8e375db72d2bf1f3..3b71622e40f44a7f8d98e4794a688dd224713029 100644 (file)
@@ -33,8 +33,8 @@ const struct file_operations afs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = afs_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = afs_file_write,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = afs_fsync,
index 789bc253b5f63c2d3c9abfdd216e0778b140a765..ce25d755b7aa16d68b131dd4944898e560e0d8e3 100644 (file)
@@ -259,7 +259,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
 #ifdef CONFIG_AFS_FSCACHE
        vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
                                              &afs_vnode_cache_index_def,
-                                             vnode);
+                                             vnode, true);
 #endif
 
        ret = afs_inode_map_status(vnode, key);
index a306bb6d88d9937badc2a1df1462d3bb0f469829..9c048ffac900f0fc89ecdb045449ef38d907c875 100644 (file)
@@ -747,8 +747,7 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
 extern int afs_writepage(struct page *, struct writeback_control *);
 extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
-extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
-                             unsigned long, loff_t);
+extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *, loff_t);
 extern int afs_writeback_all(struct afs_vnode *);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
index 57bcb1596530892e91ebf39fd12abfee0fe2dead..b6df2e83809f4ac81ed5b3d02f944ac7f32d9de8 100644 (file)
@@ -308,7 +308,8 @@ static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
        /* see if we have an in-cache copy (will set vl->valid if there is) */
 #ifdef CONFIG_AFS_FSCACHE
        vl->cache = fscache_acquire_cookie(vl->cell->cache,
-                                          &afs_vlocation_cache_index_def, vl);
+                                          &afs_vlocation_cache_index_def, vl,
+                                          true);
 #endif
 
        if (vl->valid) {
index 401eeb21869ff0657b966f4753fd951bc7897333..2b607257820c8ed7b383e486f3a7870052974ae4 100644 (file)
@@ -131,7 +131,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
 #ifdef CONFIG_AFS_FSCACHE
        volume->cache = fscache_acquire_cookie(vlocation->cache,
                                               &afs_volume_cache_index_def,
-                                              volume);
+                                              volume, true);
 #endif
        afs_get_vlocation(vlocation);
        volume->vlocation = vlocation;
index a890db4b9898fc1d888c5e7285da55db85e4da54..9fa2f596430accaecd06970f83209dba6e80425e 100644 (file)
@@ -625,15 +625,14 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 /*
  * write to an AFS file
  */
-ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
        _enter("{%x.%u},{%zu},%lu,",
-              vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
+              vnode->fid.vid, vnode->fid.vnode, count, iter->nr_segs);
 
        if (IS_SWAPFILE(&vnode->vfs_inode)) {
                printk(KERN_INFO
@@ -644,7 +643,7 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                return 0;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (IS_ERR_VALUE(result)) {
                _leave(" = %zd", result);
                return result;
index 6b868f0e0c4c019c3b68712ca230deb979711a98..a5630703eb565f4c0d2062bbe7b3ab1617dc2213 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,10 +167,25 @@ static int __init aio_setup(void)
 }
 __initcall(aio_setup);
 
+static void put_aio_ring_file(struct kioctx *ctx)
+{
+       struct file *aio_ring_file = ctx->aio_ring_file;
+       if (aio_ring_file) {
+               truncate_setsize(aio_ring_file->f_inode, 0);
+
+               /* Prevent further access to the kioctx from migratepages */
+               spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
+               aio_ring_file->f_inode->i_mapping->private_data = NULL;
+               ctx->aio_ring_file = NULL;
+               spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
+
+               fput(aio_ring_file);
+       }
+}
+
 static void aio_free_ring(struct kioctx *ctx)
 {
        int i;
-       struct file *aio_ring_file = ctx->aio_ring_file;
 
        for (i = 0; i < ctx->nr_pages; i++) {
                pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -178,14 +193,10 @@ static void aio_free_ring(struct kioctx *ctx)
                put_page(ctx->ring_pages[i]);
        }
 
+       put_aio_ring_file(ctx);
+
        if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
                kfree(ctx->ring_pages);
-
-       if (aio_ring_file) {
-               truncate_setsize(aio_ring_file->f_inode, 0);
-               fput(aio_ring_file);
-               ctx->aio_ring_file = NULL;
-       }
 }
 
 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -207,9 +218,8 @@ static int aio_set_page_dirty(struct page *page)
 static int aio_migratepage(struct address_space *mapping, struct page *new,
                        struct page *old, enum migrate_mode mode)
 {
-       struct kioctx *ctx = mapping->private_data;
+       struct kioctx *ctx;
        unsigned long flags;
-       unsigned idx = old->index;
        int rc;
 
        /* Writeback must be complete */
@@ -224,10 +234,23 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
 
        get_page(new);
 
-       spin_lock_irqsave(&ctx->completion_lock, flags);
-       migrate_page_copy(new, old);
-       ctx->ring_pages[idx] = new;
-       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       /* We can potentially race against kioctx teardown here.  Use the
+        * address_space's private data lock to protect the mapping's
+        * private_data.
+        */
+       spin_lock(&mapping->private_lock);
+       ctx = mapping->private_data;
+       if (ctx) {
+               pgoff_t idx;
+               spin_lock_irqsave(&ctx->completion_lock, flags);
+               migrate_page_copy(new, old);
+               idx = old->index;
+               if (idx < (pgoff_t)ctx->nr_pages)
+                       ctx->ring_pages[idx] = new;
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       } else
+               rc = -EBUSY;
+       spin_unlock(&mapping->private_lock);
 
        return rc;
 }
@@ -617,8 +640,7 @@ out_freepcpu:
 out_freeref:
        free_percpu(ctx->users.pcpu_count);
 out_freectx:
-       if (ctx->aio_ring_file)
-               fput(ctx->aio_ring_file);
+       put_aio_ring_file(ctx);
        kmem_cache_free(kioctx_cachep, ctx);
        pr_debug("error allocating ioctx %d\n", err);
        return ERR_PTR(err);
@@ -855,6 +877,10 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
                iocb->ki_ctx = ERR_PTR(-EXDEV);
                wake_up_process(iocb->ki_obj.tsk);
                return;
+       } else if (is_kernel_kiocb(iocb)) {
+               iocb->ki_obj.complete(iocb->ki_user_data, res);
+               aio_kernel_free(iocb);
+               return;
        }
 
        /*
@@ -1173,13 +1199,55 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
        return 0;
 }
 
+static ssize_t aio_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_READ)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_READ);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->read_iter)
+               return -EINVAL;
+
+       return file->f_op->read_iter(iocb, iter, iocb->ki_pos);
+}
+
+static ssize_t aio_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_WRITE);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->write_iter)
+               return -EINVAL;
+
+       return file->f_op->write_iter(iocb, iter, iocb->ki_pos);
+}
+
 /*
  * aio_setup_iocb:
  *     Performs the initial checks and aio retry method
  *     setup for the kiocb at the time of io submission.
  */
 static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
-                           char __user *buf, bool compat)
+                           void *buf, bool compat)
 {
        struct file *file = req->ki_filp;
        ssize_t ret;
@@ -1194,14 +1262,14 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
        case IOCB_CMD_PREADV:
                mode    = FMODE_READ;
                rw      = READ;
-               rw_op   = file->f_op->aio_read;
+               rw_op   = do_aio_read;
                goto rw_common;
 
        case IOCB_CMD_PWRITE:
        case IOCB_CMD_PWRITEV:
                mode    = FMODE_WRITE;
                rw      = WRITE;
-               rw_op   = file->f_op->aio_write;
+               rw_op   = do_aio_write;
                goto rw_common;
 rw_common:
                if (unlikely(!(file->f_mode & mode)))
@@ -1244,6 +1312,14 @@ rw_common:
                        file_end_write(file);
                break;
 
+       case IOCB_CMD_READ_ITER:
+               ret = aio_read_iter(req, buf);
+               break;
+
+       case IOCB_CMD_WRITE_ITER:
+               ret = aio_write_iter(req, buf);
+               break;
+
        case IOCB_CMD_FDSYNC:
                if (!file->f_op->aio_fsync)
                        return -EINVAL;
@@ -1281,6 +1357,80 @@ rw_common:
        return 0;
 }
 
+/*
+ * This allocates an iocb that will be used to submit and track completion of
+ * an IO that is issued from kernel space.
+ *
+ * The caller is expected to call the appropriate aio_kernel_init_() functions
+ * and then call aio_kernel_submit().  From that point forward progress is
+ * guaranteed by the file system aio method.  Eventually the caller's
+ * completion callback will be called.
+ *
+ * These iocbs are special.  They don't have a context, we don't limit the
+ * number pending, and they can't be canceled.
+ */
+struct kiocb *aio_kernel_alloc(gfp_t gfp)
+{
+       return kzalloc(sizeof(struct kiocb), gfp);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_alloc);
+
+void aio_kernel_free(struct kiocb *iocb)
+{
+       kfree(iocb);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_free);
+
+/*
+ * ptr and count can be a buff and bytes or an iov and segs.
+ */
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp,
+                       size_t nr, loff_t off)
+{
+       iocb->ki_filp = filp;
+       iocb->ki_nbytes = nr;
+       iocb->ki_pos = off;
+       iocb->ki_ctx = (void *)-1;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_rw);
+
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data)
+{
+       iocb->ki_obj.complete = complete;
+       iocb->ki_user_data = user_data;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_callback);
+
+/*
+ * The iocb is our responsibility once this is called.  The caller must not
+ * reference it.
+ *
+ * Callers must be prepared for their iocb completion callback to be called the
+ * moment they enter this function.  The completion callback may be called from
+ * any context.
+ *
+ * Returns: 0: the iocb completion callback will be called with the op result
+ * negative errno: the operation was not submitted and the iocb was freed
+ */
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr)
+{
+       int ret;
+
+       BUG_ON(!is_kernel_kiocb(iocb));
+       BUG_ON(!iocb->ki_obj.complete);
+       BUG_ON(!iocb->ki_filp);
+
+       ret = aio_run_iocb(iocb, op, ptr, 0);
+
+       if (ret)
+               aio_kernel_free(iocb);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_submit);
+
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         struct iocb *iocb, bool compat)
 {
@@ -1340,7 +1490,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        req->ki_nbytes = iocb->aio_nbytes;
 
        ret = aio_run_iocb(req, iocb->aio_lio_opcode,
-                          (char __user *)(unsigned long)iocb->aio_buf,
+                          (void *)(unsigned long)iocb->aio_buf,
                           compat);
        if (ret)
                goto out_put_req;
index 7c93953030fbe5eda13d76b6a8c53d6f2a31902d..38651e5da183f80b60763c7423a1171e1dfc9d6f 100644 (file)
@@ -39,12 +39,24 @@ static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        return -EIO;
 }
 
+static ssize_t bad_file_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                        unsigned long nr_segs, loff_t pos)
 {
        return -EIO;
 }
 
+static ssize_t bad_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static int bad_file_readdir(struct file *file, struct dir_context *ctx)
 {
        return -EIO;
@@ -151,7 +163,9 @@ static const struct file_operations bad_file_ops =
        .read           = bad_file_read,
        .write          = bad_file_write,
        .aio_read       = bad_file_aio_read,
+       .read_iter      = bad_file_read_iter,
        .aio_write      = bad_file_aio_write,
+       .write_iter     = bad_file_write_iter,
        .iterate        = bad_file_readdir,
        .poll           = bad_file_poll,
        .unlocked_ioctl = bad_file_unlocked_ioctl,
index e9c75e20db32d43b550506f5f8f62b2656f760e7..daa15d6ba45077755d2bc859cfffb44f6a82bfd0 100644 (file)
@@ -42,7 +42,7 @@ static void befs_destroy_inode(struct inode *inode);
 static int befs_init_inodecache(void);
 static void befs_destroy_inodecache(void);
 static void *befs_follow_link(struct dentry *, struct nameidata *);
-static void befs_put_link(struct dentry *, struct nameidata *, void *);
+static void *befs_fast_follow_link(struct dentry *, struct nameidata *);
 static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
                        char **out, int *out_len);
 static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -79,10 +79,15 @@ static const struct address_space_operations befs_aops = {
        .bmap           = befs_bmap,
 };
 
+static const struct inode_operations befs_fast_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = befs_fast_follow_link,
+};
+
 static const struct inode_operations befs_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = befs_follow_link,
-       .put_link       = befs_put_link,
+       .put_link       = kfree_put_link,
 };
 
 /* 
@@ -411,7 +416,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
                inode->i_op = &befs_dir_inode_operations;
                inode->i_fop = &befs_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               inode->i_op = &befs_symlink_inode_operations;
+               if (befs_ino->i_flags & BEFS_LONG_SYMLINK)
+                       inode->i_op = &befs_symlink_inode_operations;
+               else
+                       inode->i_op = &befs_fast_symlink_inode_operations;
        } else {
                befs_error(sb, "Inode %lu is not a regular file, "
                           "directory or symlink. THAT IS WRONG! BeFS has no "
@@ -477,47 +485,40 @@ befs_destroy_inodecache(void)
 static void *
 befs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
+       struct super_block *sb = dentry->d_sb;
        befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+       befs_data_stream *data = &befs_ino->i_data.ds;
+       befs_off_t len = data->size;
        char *link;
 
-       if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-               struct super_block *sb = dentry->d_sb;
-               befs_data_stream *data = &befs_ino->i_data.ds;
-               befs_off_t len = data->size;
+       if (len == 0) {
+               befs_error(sb, "Long symlink with illegal length");
+               link = ERR_PTR(-EIO);
+       } else {
+               befs_debug(sb, "Follow long symlink");
 
-               if (len == 0) {
-                       befs_error(sb, "Long symlink with illegal length");
+               link = kmalloc(len, GFP_NOFS);
+               if (!link) {
+                       link = ERR_PTR(-ENOMEM);
+               } else if (befs_read_lsymlink(sb, data, link, len) != len) {
+                       kfree(link);
+                       befs_error(sb, "Failed to read entire long symlink");
                        link = ERR_PTR(-EIO);
                } else {
-                       befs_debug(sb, "Follow long symlink");
-
-                       link = kmalloc(len, GFP_NOFS);
-                       if (!link) {
-                               link = ERR_PTR(-ENOMEM);
-                       } else if (befs_read_lsymlink(sb, data, link, len) != len) {
-                               kfree(link);
-                               befs_error(sb, "Failed to read entire long symlink");
-                               link = ERR_PTR(-EIO);
-                       } else {
-                               link[len - 1] = '\0';
-                       }
+                       link[len - 1] = '\0';
                }
-       } else {
-               link = befs_ino->i_data.symlink;
        }
-
        nd_set_link(nd, link);
        return NULL;
 }
 
-static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+
+static void *
+befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
-       if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-               char *link = nd_get_link(nd);
-               if (!IS_ERR(link))
-                       kfree(link);
-       }
+       nd_set_link(nd, befs_ino->i_data.symlink);
+       return NULL;
 }
 
 /*
index ae28922183357d4c0e4d491a452919e125ba8bc3..d150660d598bf8878a1513427946958a44930497 100644 (file)
@@ -24,9 +24,9 @@
 const struct file_operations bfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
 };
index 100edcc5e3122323eb8f4087305e623c666eceb0..4c94a79991bb6d8ae0d2e12ae8c647e2fe22f7fe 100644 (file)
@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
  *   long file_ofs
  * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
  */
-static void fill_files_note(struct memelfnote *note)
+static int fill_files_note(struct memelfnote *note)
 {
        struct vm_area_struct *vma;
        unsigned count, size, names_ofs, remaining, n;
@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note)
        names_ofs = (2 + 3 * count) * sizeof(data[0]);
  alloc:
        if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
-               goto err;
+               return -EINVAL;
        size = round_up(size, PAGE_SIZE);
        data = vmalloc(size);
        if (!data)
-               goto err;
+               return -ENOMEM;
 
        start_end_ofs = data + 2;
        name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note)
 
        size = name_curpos - (char *)data;
        fill_note(note, "CORE", NT_FILE, size, data);
err: ;
      return 0;
 }
 
 #ifdef CORE_DUMP_USE_REGSET
@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        fill_auxv_note(&info->auxv, current->mm);
        info->size += notesize(&info->auxv);
 
-       fill_files_note(&info->files);
-       info->size += notesize(&info->files);
+       if (fill_files_note(&info->files) == 0)
+               info->size += notesize(&info->files);
 
        return 1;
 }
@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info,
                        return 0;
                if (first && !writenote(&info->auxv, file, foffset))
                        return 0;
-               if (first && !writenote(&info->files, file, foffset))
+               if (first && info->files.data &&
+                               !writenote(&info->files, file, foffset))
                        return 0;
 
                for (i = 1; i < info->thread_notes; ++i)
@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
 
 struct elf_note_info {
        struct memelfnote *notes;
+       struct memelfnote *notes_files;
        struct elf_prstatus *prstatus;  /* NT_PRSTATUS */
        struct elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
        struct list_head thread_list;
@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
 
        fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
        fill_auxv_note(info->notes + 3, current->mm);
-       fill_files_note(info->notes + 4);
+       info->numnote = 4;
 
-       info->numnote = 5;
+       if (fill_files_note(info->notes + info->numnote) == 0) {
+               info->notes_files = info->notes + info->numnote;
+               info->numnote++;
+       }
 
        /* Try to dump the FPU. */
        info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info)
                kfree(list_entry(tmp, struct elf_thread_status, list));
        }
 
-       /* Free data allocated by fill_files_note(): */
-       vfree(info->notes[4].data);
+       /* Free data possibly allocated by fill_files_note(): */
+       if (info->notes_files)
+               vfree(info->notes_files->data);
 
        kfree(info->prstatus);
        kfree(info->psinfo);
@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm)
        struct vm_area_struct *vma, *gate_vma;
        struct elfhdr *elf = NULL;
        loff_t offset = 0, dataoff, foffset;
-       struct elf_note_info info;
+       struct elf_note_info info = { };
        struct elf_phdr *phdr4note = NULL;
        struct elf_shdr *shdr4extnum = NULL;
        Elf_Half e_phnum;
index 60250847929fcd0421e5d72b9655bbe4c3a785e1..fc60b31453eefbbdcc234c7df78c5504da655980 100644 (file)
@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs)
                mempool_destroy(bs->bio_integrity_pool);
 
        if (bs->bvec_integrity_pool)
-               mempool_destroy(bs->bio_integrity_pool);
+               mempool_destroy(bs->bvec_integrity_pool);
 }
 EXPORT_SYMBOL(bioset_integrity_free);
 
index b3b20ed9510e5ccc285195cce7aa7e3f524ed063..ea5035da4d9a0cd9fd6f5f657bb01272c63175c3 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
                src_p = kmap_atomic(src_bv->bv_page);
                dst_p = kmap_atomic(dst_bv->bv_page);
 
-               memcpy(dst_p + dst_bv->bv_offset,
-                      src_p + src_bv->bv_offset,
+               memcpy(dst_p + dst_offset,
+                      src_p + src_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
index 1e86823a9cbda37f8451269d91a50f90d00c9566..34d9da0e6b74030d6f708f80d35f1971f03e1ca6 100644 (file)
@@ -165,14 +165,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
-                                   nr_segs, blkdev_get_block, NULL, NULL, 0);
+       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
+                                   offset, blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1508,8 +1508,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  * Does not take i_mutex for the write and thus is not for general purpose
  * use.
  */
-ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct blk_plug plug;
@@ -1518,7 +1517,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        if (ret > 0) {
                ssize_t err;
 
@@ -1529,10 +1528,10 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        blk_finish_plug(&plug);
        return ret;
 }
-EXPORT_SYMBOL_GPL(blkdev_aio_write);
+EXPORT_SYMBOL_GPL(blkdev_write_iter);
 
-static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                        loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *bd_inode = file->f_mapping->host;
@@ -1543,8 +1542,8 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
        size -= pos;
        if (size < iocb->ki_nbytes)
-               nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
-       return generic_file_aio_read(iocb, iov, nr_segs, pos);
+               iov_iter_shorten(iter, size);
+       return generic_file_read_iter(iocb, iter, pos);
 }
 
 /*
@@ -1578,8 +1577,8 @@ const struct file_operations def_blk_fops = {
        .llseek         = block_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = blkdev_aio_read,
-       .aio_write      = blkdev_aio_write,
+       .read_iter      = blkdev_read_iter,
+       .write_iter     = blkdev_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
index 58b7d14b08ee1d47ec66a06243ee16f63854b676..08cc08f037a633199bffb3fadfa95750302eb25e 100644 (file)
@@ -107,7 +107,8 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
                worker->idle = 1;
 
                /* the list may be empty if the worker is just starting */
-               if (!list_empty(&worker->worker_list)) {
+               if (!list_empty(&worker->worker_list) &&
+                   !worker->workers->stopping) {
                        list_move(&worker->worker_list,
                                 &worker->workers->idle_list);
                }
@@ -127,7 +128,8 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
                spin_lock_irqsave(&worker->workers->lock, flags);
                worker->idle = 0;
 
-               if (!list_empty(&worker->worker_list)) {
+               if (!list_empty(&worker->worker_list) &&
+                   !worker->workers->stopping) {
                        list_move_tail(&worker->worker_list,
                                      &worker->workers->worker_list);
                }
@@ -412,6 +414,7 @@ void btrfs_stop_workers(struct btrfs_workers *workers)
        int can_stop;
 
        spin_lock_irq(&workers->lock);
+       workers->stopping = 1;
        list_splice_init(&workers->idle_list, &workers->worker_list);
        while (!list_empty(&workers->worker_list)) {
                cur = workers->worker_list.next;
@@ -455,6 +458,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
        workers->ordered = 0;
        workers->atomic_start_pending = 0;
        workers->atomic_worker_start = async_helper;
+       workers->stopping = 0;
 }
 
 /*
@@ -480,15 +484,19 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
        atomic_set(&worker->num_pending, 0);
        atomic_set(&worker->refs, 1);
        worker->workers = workers;
-       worker->task = kthread_run(worker_loop, worker,
-                                  "btrfs-%s-%d", workers->name,
-                                  workers->num_workers + 1);
+       worker->task = kthread_create(worker_loop, worker,
+                                     "btrfs-%s-%d", workers->name,
+                                     workers->num_workers + 1);
        if (IS_ERR(worker->task)) {
                ret = PTR_ERR(worker->task);
-               kfree(worker);
                goto fail;
        }
+
        spin_lock_irq(&workers->lock);
+       if (workers->stopping) {
+               spin_unlock_irq(&workers->lock);
+               goto fail_kthread;
+       }
        list_add_tail(&worker->worker_list, &workers->idle_list);
        worker->idle = 1;
        workers->num_workers++;
@@ -496,8 +504,13 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
        WARN_ON(workers->num_workers_starting < 0);
        spin_unlock_irq(&workers->lock);
 
+       wake_up_process(worker->task);
        return 0;
+
+fail_kthread:
+       kthread_stop(worker->task);
 fail:
+       kfree(worker);
        spin_lock_irq(&workers->lock);
        workers->num_workers_starting--;
        spin_unlock_irq(&workers->lock);
index 063698b90ce2dd0481663aeee46b30ef796d52e2..1f26792683edf195b48db80992c05acae6fd71f0 100644 (file)
@@ -107,6 +107,8 @@ struct btrfs_workers {
 
        /* extra name for this worker, used for current->name */
        char *name;
+
+       int stopping;
 };
 
 void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
index d0ae226926ee2d2f43d29220da8fd531f31e8687..71f074e1870b2e9fe183cf338ad2515314155e6e 100644 (file)
@@ -213,7 +213,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
 static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
 {
        if (BTRFS_I(inode)->logged_trans == generation &&
-           BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit)
+           BTRFS_I(inode)->last_sub_trans <=
+           BTRFS_I(inode)->last_log_commit &&
+           BTRFS_I(inode)->last_sub_trans <=
+           BTRFS_I(inode)->root->last_log_commit)
                return 1;
        return 0;
 }
index 64346721173f24f31170a8e70e85f7d49a83b609..61b5bcd57b7e320624c2778d2051db03b79f2282 100644 (file)
@@ -1005,8 +1005,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                return ret;
        }
 
-       if (root->ref_cows)
-               btrfs_reloc_cow_block(trans, root, buf, cow);
+       if (root->ref_cows) {
+               ret = btrfs_reloc_cow_block(trans, root, buf, cow);
+               if (ret)
+                       return ret;
+       }
 
        if (buf == root->node) {
                WARN_ON(parent && parent != buf);
index 3c1da6f98a4d25666f4a2afb6fce25bff80ac8c8..a80a2ccb955c97fd6b1a26d37ce4ff233c0e0c73 100644 (file)
@@ -1118,15 +1118,6 @@ struct btrfs_space_info {
         */
        struct percpu_counter total_bytes_pinned;
 
-       /*
-        * we bump reservation progress every time we decrement
-        * bytes_reserved.  This way people waiting for reservations
-        * know something good has happened and they can check
-        * for progress.  The number here isn't to be trusted, it
-        * just shows reclaim activity
-        */
-       unsigned long reservation_progress;
-
        unsigned int full:1;    /* indicates that we cannot allocate any more
                                   chunks for this space */
        unsigned int chunk_alloc:1;     /* set if we are allocating a chunk */
@@ -3114,11 +3105,6 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
        ((unsigned long)(btrfs_leaf_data(leaf) + \
        btrfs_item_offset_nr(leaf, slot)))
 
-static inline struct dentry *fdentry(struct file *file)
-{
-       return file->f_path.dentry;
-}
-
 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 {
        return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
@@ -3135,7 +3121,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
                                                 unsigned num_items)
 {
        return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
-               3 * num_items;
+               2 * num_items;
 }
 
 /*
@@ -3939,9 +3925,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root);
 int btrfs_recover_relocation(struct btrfs_root *root);
 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, struct extent_buffer *buf,
-                          struct extent_buffer *cow);
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+                         struct btrfs_root *root, struct extent_buffer *buf,
+                         struct extent_buffer *cow);
 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
                              struct btrfs_pending_snapshot *pending,
                              u64 *bytes_to_reserve);
index a64435359385e86a483f30696c932da2c8d5bdb0..9efb94e95858e8c53cb84f587b74a064e8701dc4 100644 (file)
@@ -400,7 +400,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
        args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
        btrfs_dev_replace_unlock(dev_replace);
 
-       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+       btrfs_wait_all_ordered_extents(root->fs_info);
 
        /* force writing the updated state information to disk */
        trans = btrfs_start_transaction(root, 0);
@@ -475,7 +475,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
        }
-       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+       btrfs_wait_all_ordered_extents(root->fs_info);
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
@@ -535,10 +535,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
        list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
 
        btrfs_rm_dev_replace_srcdev(fs_info, src_device);
-       if (src_device->bdev) {
-               /* zero out the old super */
-               btrfs_scratch_superblock(src_device);
-       }
+
        /*
         * this is again a consistent state where no dev_replace procedure
         * is running, the target device is part of the filesystem, the
index 4cbb00af92ff3bed86561b9f02ec6fbcc04665b2..62176ad89846173e4d4987da257166fb90b971ff 100644 (file)
@@ -157,6 +157,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
+       { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
        { .id = 0,                              .name_stem = "tree"     },
 };
 
@@ -1560,8 +1561,9 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
        return ret;
 }
 
-struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
-                                             struct btrfs_key *location)
+struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+                                    struct btrfs_key *location,
+                                    bool check_ref)
 {
        struct btrfs_root *root;
        int ret;
@@ -1585,7 +1587,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 again:
        root = btrfs_lookup_fs_root(fs_info, location->objectid);
        if (root) {
-               if (btrfs_root_refs(&root->root_item) == 0)
+               if (check_ref && btrfs_root_refs(&root->root_item) == 0)
                        return ERR_PTR(-ENOENT);
                return root;
        }
@@ -1594,7 +1596,7 @@ again:
        if (IS_ERR(root))
                return root;
 
-       if (btrfs_root_refs(&root->root_item) == 0) {
+       if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
                ret = -ENOENT;
                goto fail;
        }
@@ -3415,6 +3417,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
        if (total_errors > max_errors) {
                printk(KERN_ERR "btrfs: %d errors while writing supers\n",
                       total_errors);
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
                /* FUA is masked off if unsupported and can't be the reason */
                btrfs_error(root->fs_info, -EIO,
index b71acd6e1e5b1941e75ed4c5c056d47268cdc407..5ce2a7da8b113fef13456687fdf4243fa9f1c2ba 100644 (file)
@@ -68,8 +68,17 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
 int btrfs_init_fs_root(struct btrfs_root *root);
 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
                         struct btrfs_root *root);
-struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
-                                             struct btrfs_key *location);
+
+struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+                                    struct btrfs_key *key,
+                                    bool check_ref);
+static inline struct btrfs_root *
+btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
+                          struct btrfs_key *location)
+{
+       return btrfs_get_fs_root(fs_info, location, true);
+}
+
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
 void btrfs_btree_balance_dirty(struct btrfs_root *root);
 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
index cfb3cf711b34d6555afed3d21dd16480c662cc55..d58bef130a41984ac7e3172aad43eb87547af64b 100644 (file)
@@ -3925,7 +3925,6 @@ static int can_overcommit(struct btrfs_root *root,
        u64 space_size;
        u64 avail;
        u64 used;
-       u64 to_add;
 
        used = space_info->bytes_used + space_info->bytes_reserved +
                space_info->bytes_pinned + space_info->bytes_readonly;
@@ -3959,25 +3958,17 @@ static int can_overcommit(struct btrfs_root *root,
                       BTRFS_BLOCK_GROUP_RAID10))
                avail >>= 1;
 
-       to_add = space_info->total_bytes;
-
        /*
         * If we aren't flushing all things, let us overcommit up to
         * 1/2th of the space. If we can flush, don't let us overcommit
         * too much, let it overcommit up to 1/8 of the space.
         */
        if (flush == BTRFS_RESERVE_FLUSH_ALL)
-               to_add >>= 3;
+               avail >>= 3;
        else
-               to_add >>= 1;
-
-       /*
-        * Limit the overcommit to the amount of free space we could possibly
-        * allocate for chunks.
-        */
-       to_add = min(avail, to_add);
+               avail >>= 1;
 
-       if (used + bytes < space_info->total_bytes + to_add)
+       if (used + bytes < space_info->total_bytes + avail)
                return 1;
        return 0;
 }
@@ -4000,7 +3991,7 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
                 */
                btrfs_start_all_delalloc_inodes(root->fs_info, 0);
                if (!current->journal_info)
-                       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+                       btrfs_wait_all_ordered_extents(root->fs_info);
        }
 }
 
@@ -4030,7 +4021,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
        if (delalloc_bytes == 0) {
                if (trans)
                        return;
-               btrfs_wait_all_ordered_extents(root->fs_info, 0);
+               btrfs_wait_all_ordered_extents(root->fs_info);
                return;
        }
 
@@ -4058,7 +4049,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 
                loops++;
                if (wait_ordered && !trans) {
-                       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+                       btrfs_wait_all_ordered_extents(root->fs_info);
                } else {
                        time_left = schedule_timeout_killable(1);
                        if (time_left)
@@ -4465,7 +4456,6 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
                        space_info->bytes_may_use -= num_bytes;
                        trace_btrfs_space_reservation(fs_info, "space_info",
                                        space_info->flags, num_bytes, 0);
-                       space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
        }
@@ -4666,7 +4656,6 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                sinfo->bytes_may_use -= num_bytes;
                trace_btrfs_space_reservation(fs_info, "space_info",
                                      sinfo->flags, num_bytes, 0);
-               sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
        }
@@ -5446,7 +5435,6 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                        space_info->bytes_readonly += num_bytes;
                cache->reserved -= num_bytes;
                space_info->bytes_reserved -= num_bytes;
-               space_info->reservation_progress++;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
@@ -6117,10 +6105,13 @@ enum btrfs_loop_type {
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
- * ins->objectid == block start
+ * ins->objectid == start position
  * ins->flags = BTRFS_EXTENT_ITEM_KEY
- * ins->offset == number of blocks
+ * ins->offset == the size of the hole.
  * Any available blocks before search_start are skipped.
+ *
+ * If there is no suitable free space, we will record the max size of
+ * the free space extent currently.
  */
 static noinline int find_free_extent(struct btrfs_root *orig_root,
                                     u64 num_bytes, u64 empty_size,
@@ -6133,6 +6124,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        struct btrfs_block_group_cache *block_group = NULL;
        struct btrfs_block_group_cache *used_block_group;
        u64 search_start = 0;
+       u64 max_extent_size = 0;
        int empty_cluster = 2 * 1024 * 1024;
        struct btrfs_space_info *space_info;
        int loop = 0;
@@ -6292,7 +6284,10 @@ have_block_group:
                                btrfs_get_block_group(used_block_group);
 
                        offset = btrfs_alloc_from_cluster(used_block_group,
-                         last_ptr, num_bytes, used_block_group->key.objectid);
+                                               last_ptr,
+                                               num_bytes,
+                                               used_block_group->key.objectid,
+                                               &max_extent_size);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
@@ -6355,8 +6350,10 @@ refill_cluster:
                                 * cluster
                                 */
                                offset = btrfs_alloc_from_cluster(block_group,
-                                                 last_ptr, num_bytes,
-                                                 search_start);
+                                                       last_ptr,
+                                                       num_bytes,
+                                                       search_start,
+                                                       &max_extent_size);
                                if (offset) {
                                        /* we found one, proceed */
                                        spin_unlock(&last_ptr->refill_lock);
@@ -6391,13 +6388,18 @@ unclustered_alloc:
                if (cached &&
                    block_group->free_space_ctl->free_space <
                    num_bytes + empty_cluster + empty_size) {
+                       if (block_group->free_space_ctl->free_space >
+                           max_extent_size)
+                               max_extent_size =
+                                       block_group->free_space_ctl->free_space;
                        spin_unlock(&block_group->free_space_ctl->tree_lock);
                        goto loop;
                }
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                offset = btrfs_find_space_for_alloc(block_group, search_start,
-                                                   num_bytes, empty_size);
+                                                   num_bytes, empty_size,
+                                                   &max_extent_size);
                /*
                 * If we didn't find a chunk, and we haven't failed on this
                 * block group before, and this block group is in the middle of
@@ -6515,7 +6517,8 @@ loop:
                ret = 0;
        }
 out:
-
+       if (ret == -ENOSPC)
+               ins->offset = max_extent_size;
        return ret;
 }
 
@@ -6573,8 +6576,8 @@ again:
                               flags);
 
        if (ret == -ENOSPC) {
-               if (!final_tried) {
-                       num_bytes = num_bytes >> 1;
+               if (!final_tried && ins->offset) {
+                       num_bytes = min(num_bytes >> 1, ins->offset);
                        num_bytes = round_down(num_bytes, root->sectorsize);
                        num_bytes = max(num_bytes, min_alloc_size);
                        if (num_bytes == min_alloc_size)
index 09582b81640cc8e02ced7ea7c8ec6b22acdc8dba..51731b76900de55e8350d5795feacc61a9050d02 100644 (file)
@@ -145,8 +145,16 @@ int __init extent_io_init(void)
                                     offsetof(struct btrfs_io_bio, bio));
        if (!btrfs_bioset)
                goto free_buffer_cache;
+
+       if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
+               goto free_bioset;
+
        return 0;
 
+free_bioset:
+       bioset_free(btrfs_bioset);
+       btrfs_bioset = NULL;
+
 free_buffer_cache:
        kmem_cache_destroy(extent_buffer_cache);
        extent_buffer_cache = NULL;
@@ -1481,11 +1489,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
                *end = state->end;
                cur_start = state->end + 1;
                node = rb_next(node);
-               if (!node)
-                       break;
                total_bytes += state->end - state->start + 1;
                if (total_bytes >= max_bytes)
                        break;
+               if (!node)
+                       break;
        }
 out:
        spin_unlock(&tree->lock);
@@ -1612,7 +1620,7 @@ again:
                *start = delalloc_start;
                *end = delalloc_end;
                free_extent_state(cached_state);
-               return found;
+               return 0;
        }
 
        /*
@@ -1625,10 +1633,9 @@ again:
 
        /*
         * make sure to limit the number of pages we try to lock down
-        * if we're looping.
         */
-       if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
-               delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+       if (delalloc_end + 1 - delalloc_start > max_bytes)
+               delalloc_end = delalloc_start + max_bytes - 1;
 
        /* step two, lock all the pages after the page that has start */
        ret = lock_delalloc_pages(inode, locked_page,
@@ -1639,8 +1646,7 @@ again:
                 */
                free_extent_state(cached_state);
                if (!loops) {
-                       unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
-                       max_bytes = PAGE_CACHE_SIZE - offset;
+                       max_bytes = PAGE_CACHE_SIZE;
                        loops = 1;
                        goto again;
                } else {
index bc5072b2db537f0f27af1851532b7417b41a8489..5e70fc2cef27f886a8dbe65dedc13a5cba0cf094 100644 (file)
@@ -453,7 +453,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
                write_bytes -= copied;
                total_copied += copied;
 
-               /* Return to btrfs_file_aio_write to fault page */
+               /* Return to btrfs_file_write_iter to fault page */
                if (unlikely(copied == 0))
                        break;
 
@@ -1557,27 +1557,23 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 }
 
 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos,
-                                   loff_t *ppos, size_t count, size_t ocount)
+                                    struct iov_iter *iter, loff_t pos,
+                                   loff_t *ppos, size_t count)
 {
        struct file *file = iocb->ki_filp;
-       struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
        loff_t endbyte;
        int err;
 
-       written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
-                                           count, ocount);
+       written = generic_file_direct_write_iter(iocb, iter, pos, ppos, count);
 
        if (written < 0 || written == count)
                return written;
 
        pos += written;
        count -= written;
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       written_buffered = __btrfs_buffered_write(file, &i, pos);
+       written_buffered = __btrfs_buffered_write(file, iter, pos);
        if (written_buffered < 0) {
                err = written_buffered;
                goto out;
@@ -1612,9 +1608,8 @@ static void update_time_for_write(struct inode *inode)
                inode_inc_iversion(inode);
 }
 
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos)
+static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
@@ -1623,17 +1618,12 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        u64 start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
-       size_t count, ocount;
+       size_t count;
        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
 
        mutex_lock(&inode->i_mutex);
 
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
-       }
-       count = ocount;
+       count = iov_iter_count(iter);
 
        current->backing_dev_info = inode->i_mapping->backing_dev_info;
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
@@ -1686,14 +1676,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                atomic_inc(&BTRFS_I(inode)->sync_writers);
 
        if (unlikely(file->f_flags & O_DIRECT)) {
-               num_written = __btrfs_direct_write(iocb, iov, nr_segs,
-                                                  pos, ppos, count, ocount);
+               num_written = __btrfs_direct_write(iocb, iter, pos, ppos,
+                                                  count);
        } else {
-               struct iov_iter i;
-
-               iov_iter_init(&i, iov, nr_segs, count, num_written);
-
-               num_written = __btrfs_buffered_write(file, &i, pos);
+               num_written = __btrfs_buffered_write(file, iter, pos);
                if (num_written > 0)
                        *ppos = pos + num_written;
        }
@@ -1859,8 +1845,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        ret = btrfs_log_dentry_safe(trans, root, dentry);
        if (ret < 0) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
+               /* Fallthrough and commit/free transaction. */
+               ret = 1;
        }
 
        /* we've logged all the items and now have a consistent
@@ -2552,9 +2538,9 @@ const struct file_operations btrfs_file_operations = {
        .llseek         = btrfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
        .splice_read    = generic_file_splice_read,
-       .aio_write      = btrfs_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = btrfs_file_write_iter,
        .mmap           = btrfs_file_mmap,
        .open           = generic_file_open,
        .release        = btrfs_release_file,
index 3f0ddfce96e6ff2bdaaef019ea7ee00f9af070af..b4f9904c4c6b2ed5f0da30ad664baf33100a879c 100644 (file)
@@ -1431,13 +1431,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
        ctl->free_space += bytes;
 }
 
+/*
+ * If we can not find suitable extent, we will use bytes to record
+ * the size of the max extent.
+ */
 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                         struct btrfs_free_space *bitmap_info, u64 *offset,
                         u64 *bytes)
 {
        unsigned long found_bits = 0;
+       unsigned long max_bits = 0;
        unsigned long bits, i;
        unsigned long next_zero;
+       unsigned long extent_bits;
 
        i = offset_to_bit(bitmap_info->offset, ctl->unit,
                          max_t(u64, *offset, bitmap_info->offset));
@@ -1446,9 +1452,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
        for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
                next_zero = find_next_zero_bit(bitmap_info->bitmap,
                                               BITS_PER_BITMAP, i);
-               if ((next_zero - i) >= bits) {
-                       found_bits = next_zero - i;
+               extent_bits = next_zero - i;
+               if (extent_bits >= bits) {
+                       found_bits = extent_bits;
                        break;
+               } else if (extent_bits > max_bits) {
+                       max_bits = extent_bits;
                }
                i = next_zero;
        }
@@ -1459,38 +1468,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                return 0;
        }
 
+       *bytes = (u64)(max_bits) * ctl->unit;
        return -1;
 }
 
+/* Cache the size of the max extent in bytes */
 static struct btrfs_free_space *
 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
-               unsigned long align)
+               unsigned long align, u64 *max_extent_size)
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
-       u64 ctl_off;
        u64 tmp;
        u64 align_off;
        int ret;
 
        if (!ctl->free_space_offset.rb_node)
-               return NULL;
+               goto out;
 
        entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
        if (!entry)
-               return NULL;
+               goto out;
 
        for (node = &entry->offset_index; node; node = rb_next(node)) {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
-               if (entry->bytes < *bytes)
+               if (entry->bytes < *bytes) {
+                       if (entry->bytes > *max_extent_size)
+                               *max_extent_size = entry->bytes;
                        continue;
+               }
 
                /* make sure the space returned is big enough
                 * to match our requested alignment
                 */
                if (*bytes >= align) {
-                       ctl_off = entry->offset - ctl->start;
-                       tmp = ctl_off + align - 1;;
+                       tmp = entry->offset - ctl->start + align - 1;
                        do_div(tmp, align);
                        tmp = tmp * align + ctl->start;
                        align_off = tmp - entry->offset;
@@ -1499,14 +1511,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
                        tmp = entry->offset;
                }
 
-               if (entry->bytes < *bytes + align_off)
+               if (entry->bytes < *bytes + align_off) {
+                       if (entry->bytes > *max_extent_size)
+                               *max_extent_size = entry->bytes;
                        continue;
+               }
 
                if (entry->bitmap) {
-                       ret = search_bitmap(ctl, entry, &tmp, bytes);
+                       u64 size = *bytes;
+
+                       ret = search_bitmap(ctl, entry, &tmp, &size);
                        if (!ret) {
                                *offset = tmp;
+                               *bytes = size;
                                return entry;
+                       } else if (size > *max_extent_size) {
+                               *max_extent_size = size;
                        }
                        continue;
                }
@@ -1515,7 +1535,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
                *bytes = entry->bytes - align_off;
                return entry;
        }
-
+out:
        return NULL;
 }
 
@@ -2116,7 +2136,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
 }
 
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
-                              u64 offset, u64 bytes, u64 empty_size)
+                              u64 offset, u64 bytes, u64 empty_size,
+                              u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
@@ -2127,7 +2148,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 
        spin_lock(&ctl->tree_lock);
        entry = find_free_space(ctl, &offset, &bytes_search,
-                               block_group->full_stripe_len);
+                               block_group->full_stripe_len, max_extent_size);
        if (!entry)
                goto out;
 
@@ -2137,7 +2158,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                if (!entry->bytes)
                        free_bitmap(ctl, entry);
        } else {
-
                unlink_free_space(ctl, entry);
                align_gap_len = offset - entry->offset;
                align_gap = entry->offset;
@@ -2151,7 +2171,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                else
                        link_free_space(ctl, entry);
        }
-
 out:
        spin_unlock(&ctl->tree_lock);
 
@@ -2206,7 +2225,8 @@ int btrfs_return_cluster_to_free_space(
 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_cluster *cluster,
                                   struct btrfs_free_space *entry,
-                                  u64 bytes, u64 min_start)
+                                  u64 bytes, u64 min_start,
+                                  u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        int err;
@@ -2218,8 +2238,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
        search_bytes = bytes;
 
        err = search_bitmap(ctl, entry, &search_start, &search_bytes);
-       if (err)
+       if (err) {
+               if (search_bytes > *max_extent_size)
+                       *max_extent_size = search_bytes;
                return 0;
+       }
 
        ret = search_start;
        __bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2234,7 +2257,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
  */
 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
-                            u64 min_start)
+                            u64 min_start, u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
@@ -2254,6 +2277,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 
        entry = rb_entry(node, struct btrfs_free_space, offset_index);
        while(1) {
+               if (entry->bytes < bytes && entry->bytes > *max_extent_size)
+                       *max_extent_size = entry->bytes;
+
                if (entry->bytes < bytes ||
                    (!entry->bitmap && entry->offset < min_start)) {
                        node = rb_next(&entry->offset_index);
@@ -2267,7 +2293,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                if (entry->bitmap) {
                        ret = btrfs_alloc_from_bitmap(block_group,
                                                      cluster, entry, bytes,
-                                                     cluster->window_start);
+                                                     cluster->window_start,
+                                                     max_extent_size);
                        if (ret == 0) {
                                node = rb_next(&entry->offset_index);
                                if (!node)
index c7490416747656e1f3f119c5dc4f91cbf315e2ca..e737f92cf6d0b69ffbcc1e94882abc8693c3e3a5 100644 (file)
@@ -94,7 +94,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
                                     *block_group);
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
-                              u64 offset, u64 bytes, u64 empty_size);
+                              u64 offset, u64 bytes, u64 empty_size,
+                              u64 *max_extent_size);
 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes);
@@ -105,7 +106,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
-                            u64 min_start);
+                            u64 min_start, u64 *max_extent_size);
 int btrfs_return_cluster_to_free_space(
                               struct btrfs_block_group_cache *block_group,
                               struct btrfs_free_cluster *cluster);
index f338c5672d583a27dddc37c5e217fa6a8995e271..e900216d89d0ab478e4f53f45b32c5c142018fd9 100644 (file)
@@ -4688,11 +4688,11 @@ static void inode_tree_add(struct inode *inode)
        struct btrfs_inode *entry;
        struct rb_node **p;
        struct rb_node *parent;
+       struct rb_node *new = &BTRFS_I(inode)->rb_node;
        u64 ino = btrfs_ino(inode);
 
        if (inode_unhashed(inode))
                return;
-again:
        parent = NULL;
        spin_lock(&root->inode_lock);
        p = &root->inode_tree.rb_node;
@@ -4707,14 +4707,14 @@ again:
                else {
                        WARN_ON(!(entry->vfs_inode.i_state &
                                  (I_WILL_FREE | I_FREEING)));
-                       rb_erase(parent, &root->inode_tree);
+                       rb_replace_node(parent, new, &root->inode_tree);
                        RB_CLEAR_NODE(parent);
                        spin_unlock(&root->inode_lock);
-                       goto again;
+                       return;
                }
        }
-       rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
-       rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
+       rb_link_node(new, parent, p);
+       rb_insert_color(new, &root->inode_tree);
        spin_unlock(&root->inode_lock);
 }
 
@@ -6437,6 +6437,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 
        if (btrfs_extent_readonly(root, disk_bytenr))
                goto out;
+       btrfs_release_path(path);
 
        /*
         * look for other files referencing this extent, if we
@@ -7154,8 +7155,7 @@ free_ordered:
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        int seg;
        int i;
@@ -7169,35 +7169,50 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                goto out;
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if ((addr & blocksize_mask) || (size & blocksize_mask))
-                       goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
+                               goto out;
 
-               /* If this is a write we don't need to check anymore */
-               if (rw & WRITE)
-                       continue;
+                       /* If this is a write we don't need to check anymore */
+                       if (rw & WRITE)
+                               continue;
 
-               /*
-                * Check to make sure we don't have duplicate iov_base's in this
-                * iovec, if so return EINVAL, otherwise we'll get csum errors
-                * when reading back.
-                */
-               for (i = seg + 1; i < nr_segs; i++) {
-                       if (iov[seg].iov_base == iov[i].iov_base)
+                       /*
+                       * Check to make sure we don't have duplicate iov_base's
+                       * in this iovec, if so return EINVAL, otherwise we'll
+                       * get csum errors when reading back.
+                       */
+                       for (i = seg + 1; i < iter->nr_segs; i++) {
+                               if (iov[seg].iov_base == iov[i].iov_base)
+                                       goto out;
+                       }
+               }
+       } else if (iov_iter_has_bvec(iter)) {
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
                                goto out;
                }
-       }
+       } else
+               BUG();
+
        retval = 0;
 out:
        return retval;
 }
 
 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -7207,8 +7222,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        bool relock = false;
        ssize_t ret;
 
-       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
-                           offset, nr_segs))
+       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
                return 0;
 
        atomic_inc(&inode->i_dio_count);
@@ -7220,7 +7234,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
         * call btrfs_wait_ordered_range to make absolutely sure that any
         * outstanding dirty pages are on disk.
         */
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        btrfs_wait_ordered_range(inode, offset, count);
 
        if (rw & WRITE) {
@@ -7245,7 +7259,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
 
        ret = __blockdev_direct_IO(rw, iocb, inode,
                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                       iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+                       iter, offset, btrfs_get_blocks_direct, NULL,
                        btrfs_submit_direct, flags);
        if (rw & WRITE) {
                if (ret < 0 && ret != -EIOCBQUEUED)
@@ -7986,7 +8000,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 
        /* check for collisions, even if the  name isn't there */
-       ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
+       ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
                             new_dentry->d_name.name,
                             new_dentry->d_name.len);
 
@@ -8216,6 +8230,10 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 
                work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
                if (unlikely(!work)) {
+                       if (delay_iput)
+                               btrfs_add_delayed_iput(inode);
+                       else
+                               iput(inode);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -8613,11 +8631,13 @@ static const struct inode_operations btrfs_dir_inode_operations = {
        .removexattr    = btrfs_removexattr,
        .permission     = btrfs_permission,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .lookup         = btrfs_lookup,
        .permission     = btrfs_permission,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 
 static const struct file_operations btrfs_dir_file_operations = {
index 1a5b9462dd9ae2639513237358c2f55662bf366f..6bbf316764d7a1e1c941163dcdb5230398751c4a 100644 (file)
@@ -321,7 +321,7 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
        struct btrfs_device *device;
        struct request_queue *q;
        struct fstrim_range range;
@@ -574,7 +574,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                return ret;
 
-       btrfs_wait_ordered_extents(root, 0);
+       btrfs_wait_ordered_extents(root);
 
        pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
        if (!pending_snapshot)
@@ -2098,7 +2098,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                                             void __user *arg)
 {
-       struct dentry *parent = fdentry(file);
+       struct dentry *parent = file->f_path.dentry;
        struct dentry *dentry;
        struct inode *dir = parent->d_inode;
        struct inode *inode;
@@ -2696,9 +2696,9 @@ out_unlock:
 static long btrfs_ioctl_file_extent_same(struct file *file,
                                         void __user *argp)
 {
-       struct btrfs_ioctl_same_args *args = argp;
-       struct btrfs_ioctl_same_args same;
-       struct btrfs_ioctl_same_extent_info info;
+       struct btrfs_ioctl_same_args tmp;
+       struct btrfs_ioctl_same_args *same;
+       struct btrfs_ioctl_same_extent_info *info;
        struct inode *src = file->f_dentry->d_inode;
        struct file *dst_file = NULL;
        struct inode *dst;
@@ -2706,6 +2706,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        u64 len;
        int i;
        int ret;
+       unsigned long size;
        u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
        bool is_admin = capable(CAP_SYS_ADMIN);
 
@@ -2716,15 +2717,30 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        if (ret)
                return ret;
 
-       if (copy_from_user(&same,
+       if (copy_from_user(&tmp,
                           (struct btrfs_ioctl_same_args __user *)argp,
-                          sizeof(same))) {
+                          sizeof(tmp))) {
                ret = -EFAULT;
                goto out;
        }
 
-       off = same.logical_offset;
-       len = same.length;
+       size = sizeof(tmp) +
+               tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info);
+
+       same = kmalloc(size, GFP_NOFS);
+       if (!same) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (copy_from_user(same,
+                          (struct btrfs_ioctl_same_args __user *)argp, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       off = same->logical_offset;
+       len = same->length;
 
        /*
         * Limit the total length we will dedupe for each operation.
@@ -2752,27 +2768,28 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        if (!S_ISREG(src->i_mode))
                goto out;
 
-       ret = 0;
-       for (i = 0; i < same.dest_count; i++) {
-               if (copy_from_user(&info, &args->info[i], sizeof(info))) {
-                       ret = -EFAULT;
-                       goto out;
-               }
+       /* pre-format output fields to sane values */
+       for (i = 0; i < same->dest_count; i++) {
+               same->info[i].bytes_deduped = 0ULL;
+               same->info[i].status = 0;
+       }
 
-               info.bytes_deduped = 0;
+       ret = 0;
+       for (i = 0; i < same->dest_count; i++) {
+               info = &same->info[i];
 
-               dst_file = fget(info.fd);
+               dst_file = fget(info->fd);
                if (!dst_file) {
-                       info.status = -EBADF;
+                       info->status = -EBADF;
                        goto next;
                }
 
                if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
-                       info.status = -EINVAL;
+                       info->status = -EINVAL;
                        goto next;
                }
 
-               info.status = -EXDEV;
+               info->status = -EXDEV;
                if (file->f_path.mnt != dst_file->f_path.mnt)
                        goto next;
 
@@ -2781,32 +2798,29 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
                        goto next;
 
                if (S_ISDIR(dst->i_mode)) {
-                       info.status = -EISDIR;
+                       info->status = -EISDIR;
                        goto next;
                }
 
                if (!S_ISREG(dst->i_mode)) {
-                       info.status = -EACCES;
+                       info->status = -EACCES;
                        goto next;
                }
 
-               info.status = btrfs_extent_same(src, off, len, dst,
-                                               info.logical_offset);
-               if (info.status == 0)
-                       info.bytes_deduped += len;
+               info->status = btrfs_extent_same(src, off, len, dst,
+                                               info->logical_offset);
+               if (info->status == 0)
+                       info->bytes_deduped += len;
 
 next:
                if (dst_file)
                        fput(dst_file);
-
-               if (__put_user_unaligned(info.status, &args->info[i].status) ||
-                   __put_user_unaligned(info.bytes_deduped,
-                                        &args->info[i].bytes_deduped)) {
-                       ret = -EFAULT;
-                       goto out;
-               }                                                               
        }
 
+       ret = copy_to_user(argp, same, size);
+       if (ret)
+               ret = -EFAULT;
+
 out:
        mnt_drop_write_file(file);
        return ret;
@@ -3105,7 +3119,7 @@ out:
 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                       u64 off, u64 olen, u64 destoff)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct fd src_file;
        struct inode *src;
@@ -3310,7 +3324,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
        }
 
        if (!objectid)
-               objectid = root->root_key.objectid;
+               objectid = BTRFS_FS_TREE_OBJECTID;
 
        location.objectid = objectid;
        location.type = BTRFS_ROOT_ITEM_KEY;
@@ -4303,7 +4317,7 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
index 966b413a33b800d096eda96d662bb3a66770145f..c702cb62f78a310c8d430fe0d98cfb1173479a0f 100644 (file)
@@ -563,11 +563,10 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
  * wait for all the ordered extents in a root.  This is done when balancing
  * space between drives.
  */
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root)
 {
        struct list_head splice, works;
        struct btrfs_ordered_extent *ordered, *next;
-       struct inode *inode;
 
        INIT_LIST_HEAD(&splice);
        INIT_LIST_HEAD(&works);
@@ -580,15 +579,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
                                           root_extent_list);
                list_move_tail(&ordered->root_extent_list,
                               &root->ordered_extents);
-               /*
-                * the inode may be getting freed (in sys_unlink path).
-                */
-               inode = igrab(ordered->inode);
-               if (!inode) {
-                       cond_resched_lock(&root->ordered_extent_lock);
-                       continue;
-               }
-
                atomic_inc(&ordered->refs);
                spin_unlock(&root->ordered_extent_lock);
 
@@ -605,21 +595,13 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
        list_for_each_entry_safe(ordered, next, &works, work_list) {
                list_del_init(&ordered->work_list);
                wait_for_completion(&ordered->completion);
-
-               inode = ordered->inode;
                btrfs_put_ordered_extent(ordered);
-               if (delay_iput)
-                       btrfs_add_delayed_iput(inode);
-               else
-                       iput(inode);
-
                cond_resched();
        }
        mutex_unlock(&root->fs_info->ordered_operations_mutex);
 }
 
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
-                                   int delay_iput)
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
 {
        struct btrfs_root *root;
        struct list_head splice;
@@ -637,7 +619,7 @@ void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
                               &fs_info->ordered_roots);
                spin_unlock(&fs_info->ordered_root_lock);
 
-               btrfs_wait_ordered_extents(root, delay_iput);
+               btrfs_wait_ordered_extents(root);
                btrfs_put_fs_root(root);
 
                spin_lock(&fs_info->ordered_root_lock);
index d9a5aa097b4fea86cabf16e649e11775555de260..0c0b35612d7ad1fc5f5c7db5d267e70f319d8529 100644 (file)
@@ -195,9 +195,8 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 struct inode *inode);
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
-                                   int delay_iput);
+void btrfs_wait_ordered_extents(struct btrfs_root *root);
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info);
 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
index aacc2121e87c5df2f7dac46e90daca2514a7912f..4a355726151ec05dd8e1110745648949888781e8 100644 (file)
@@ -588,7 +588,7 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
        else
                key.offset = (u64)-1;
 
-       return btrfs_read_fs_root_no_name(fs_info, &key);
+       return btrfs_get_fs_root(fs_info, &key, false);
 }
 
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
@@ -1548,7 +1548,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
               btrfs_file_extent_other_encoding(leaf, fi));
 
        if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
-               ret = 1;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1579,7 +1579,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
        u64 end;
        u32 nritems;
        u32 i;
-       int ret;
+       int ret = 0;
        int first = 1;
        int dirty = 0;
 
@@ -1642,11 +1642,13 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 
                ret = get_new_location(rc->data_inode, &new_bytenr,
                                       bytenr, num_bytes);
-               if (ret > 0) {
-                       WARN_ON(1);
-                       continue;
+               if (ret) {
+                       /*
+                        * Don't have to abort since we've not changed anything
+                        * in the file extent yet.
+                        */
+                       break;
                }
-               BUG_ON(ret < 0);
 
                btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
                dirty = 1;
@@ -1656,18 +1658,24 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                                           num_bytes, parent,
                                           btrfs_header_owner(leaf),
                                           key.objectid, key.offset, 1);
-               BUG_ON(ret);
+               if (ret) {
+                       btrfs_abort_transaction(trans, root, ret);
+                       break;
+               }
 
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        parent, btrfs_header_owner(leaf),
                                        key.objectid, key.offset, 1);
-               BUG_ON(ret);
+               if (ret) {
+                       btrfs_abort_transaction(trans, root, ret);
+                       break;
+               }
        }
        if (dirty)
                btrfs_mark_buffer_dirty(leaf);
        if (inode)
                btrfs_add_delayed_iput(inode);
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack
@@ -4238,7 +4246,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
                err = ret;
                goto out;
        }
-       btrfs_wait_all_ordered_extents(fs_info, 0);
+       btrfs_wait_all_ordered_extents(fs_info);
 
        while (1) {
                mutex_lock(&fs_info->cleaner_mutex);
@@ -4499,19 +4507,19 @@ out:
        return ret;
 }
 
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, struct extent_buffer *buf,
-                          struct extent_buffer *cow)
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+                         struct btrfs_root *root, struct extent_buffer *buf,
+                         struct extent_buffer *cow)
 {
        struct reloc_control *rc;
        struct backref_node *node;
        int first_cow = 0;
        int level;
-       int ret;
+       int ret = 0;
 
        rc = root->fs_info->reloc_ctl;
        if (!rc)
-               return;
+               return 0;
 
        BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
               root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4547,10 +4555,9 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
                        rc->nodes_relocated += buf->len;
        }
 
-       if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
+       if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
                ret = replace_file_extents(trans, rc, root, cow);
-               BUG_ON(ret);
-       }
+       return ret;
 }
 
 /*
index 0b1f4ef8db987da12951128f092a052018d5c6b9..ec71ea44d2b4626c9a2bcc73b5fb94af666eaf5b 100644 (file)
@@ -299,11 +299,6 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                        continue;
                }
 
-               if (btrfs_root_refs(&root->root_item) == 0) {
-                       btrfs_add_dead_root(root);
-                       continue;
-               }
-
                err = btrfs_init_fs_root(root);
                if (err) {
                        btrfs_free_fs_root(root);
@@ -318,6 +313,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                        btrfs_free_fs_root(root);
                        break;
                }
+
+               if (btrfs_root_refs(&root->root_item) == 0)
+                       btrfs_add_dead_root(root);
        }
 
        btrfs_free_path(path);
index 0afcd452fcb3d62c9804089ef1cb056687917784..a18e0e23f6a6742cd21277702ed6599df84c32c3 100644 (file)
@@ -158,12 +158,20 @@ struct scrub_fixup_nodatasum {
        int                     mirror_num;
 };
 
+struct scrub_nocow_inode {
+       u64                     inum;
+       u64                     offset;
+       u64                     root;
+       struct list_head        list;
+};
+
 struct scrub_copy_nocow_ctx {
        struct scrub_ctx        *sctx;
        u64                     logical;
        u64                     len;
        int                     mirror_num;
        u64                     physical_for_dev_replace;
+       struct list_head        inodes;
        struct btrfs_work       work;
 };
 
@@ -245,7 +253,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
 static int write_page_nocow(struct scrub_ctx *sctx,
                            u64 physical_for_dev_replace, struct page *page);
 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
-                                     void *ctx);
+                                     struct scrub_copy_nocow_ctx *ctx);
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace);
 static void copy_nocow_pages_worker(struct btrfs_work *work);
@@ -3126,12 +3134,30 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
        nocow_ctx->work.func = copy_nocow_pages_worker;
+       INIT_LIST_HEAD(&nocow_ctx->inodes);
        btrfs_queue_worker(&fs_info->scrub_nocow_workers,
                           &nocow_ctx->work);
 
        return 0;
 }
 
+static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
+{
+       struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
+       struct scrub_nocow_inode *nocow_inode;
+
+       nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
+       if (!nocow_inode)
+               return -ENOMEM;
+       nocow_inode->inum = inum;
+       nocow_inode->offset = offset;
+       nocow_inode->root = root;
+       list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
+       return 0;
+}
+
+#define COPY_COMPLETE 1
+
 static void copy_nocow_pages_worker(struct btrfs_work *work)
 {
        struct scrub_copy_nocow_ctx *nocow_ctx =
@@ -3167,8 +3193,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
        }
 
        ret = iterate_inodes_from_logical(logical, fs_info, path,
-                                         copy_nocow_pages_for_inode,
-                                         nocow_ctx);
+                                         record_inode_for_nocow, nocow_ctx);
        if (ret != 0 && ret != -ENOENT) {
                pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
                        logical, physical_for_dev_replace, len, mirror_num,
@@ -3177,7 +3202,33 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
                goto out;
        }
 
+       btrfs_end_transaction(trans, root);
+       trans = NULL;
+       while (!list_empty(&nocow_ctx->inodes)) {
+               struct scrub_nocow_inode *entry;
+               entry = list_first_entry(&nocow_ctx->inodes,
+                                        struct scrub_nocow_inode,
+                                        list);
+               list_del_init(&entry->list);
+               ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
+                                                entry->root, nocow_ctx);
+               kfree(entry);
+               if (ret == COPY_COMPLETE) {
+                       ret = 0;
+                       break;
+               } else if (ret) {
+                       break;
+               }
+       }
 out:
+       while (!list_empty(&nocow_ctx->inodes)) {
+               struct scrub_nocow_inode *entry;
+               entry = list_first_entry(&nocow_ctx->inodes,
+                                        struct scrub_nocow_inode,
+                                        list);
+               list_del_init(&entry->list);
+               kfree(entry);
+       }
        if (trans && !IS_ERR(trans))
                btrfs_end_transaction(trans, root);
        if (not_written)
@@ -3190,20 +3241,25 @@ out:
        scrub_pending_trans_workers_dec(sctx);
 }
 
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
+static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
+                                     struct scrub_copy_nocow_ctx *nocow_ctx)
 {
-       struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
        struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
        struct btrfs_key key;
        struct inode *inode;
        struct page *page;
        struct btrfs_root *local_root;
+       struct btrfs_ordered_extent *ordered;
+       struct extent_map *em;
+       struct extent_state *cached_state = NULL;
+       struct extent_io_tree *io_tree;
        u64 physical_for_dev_replace;
-       u64 len;
+       u64 len = nocow_ctx->len;
+       u64 lockstart = offset, lockend = offset + len - 1;
        unsigned long index;
        int srcu_index;
-       int ret;
-       int err;
+       int ret = 0;
+       int err = 0;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
@@ -3229,9 +3285,33 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
        mutex_lock(&inode->i_mutex);
        inode_dio_wait(inode);
 
-       ret = 0;
        physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
-       len = nocow_ctx->len;
+       io_tree = &BTRFS_I(inode)->io_tree;
+
+       lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
+       ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
+       if (ordered) {
+               btrfs_put_ordered_extent(ordered);
+               goto out_unlock;
+       }
+
+       em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
+       if (IS_ERR(em)) {
+               ret = PTR_ERR(em);
+               goto out_unlock;
+       }
+
+       /*
+        * This extent does not actually cover the logical extent anymore,
+        * move on to the next inode.
+        */
+       if (em->block_start > nocow_ctx->logical ||
+           em->block_start + em->block_len < nocow_ctx->logical + len) {
+               free_extent_map(em);
+               goto out_unlock;
+       }
+       free_extent_map(em);
+
        while (len >= PAGE_CACHE_SIZE) {
                index = offset >> PAGE_CACHE_SHIFT;
 again:
@@ -3247,10 +3327,9 @@ again:
                                goto next_page;
                } else {
                        ClearPageError(page);
-                       err = extent_read_full_page(&BTRFS_I(inode)->
-                                                        io_tree,
-                                                       page, btrfs_get_extent,
-                                                       nocow_ctx->mirror_num);
+                       err = extent_read_full_page_nolock(io_tree, page,
+                                                          btrfs_get_extent,
+                                                          nocow_ctx->mirror_num);
                        if (err) {
                                ret = err;
                                goto next_page;
@@ -3264,6 +3343,7 @@ again:
                         * page in the page cache.
                         */
                        if (page->mapping != inode->i_mapping) {
+                               unlock_page(page);
                                page_cache_release(page);
                                goto again;
                        }
@@ -3287,6 +3367,10 @@ next_page:
                physical_for_dev_replace += PAGE_CACHE_SIZE;
                len -= PAGE_CACHE_SIZE;
        }
+       ret = COPY_COMPLETE;
+out_unlock:
+       unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
+                            GFP_NOFS);
 out:
        mutex_unlock(&inode->i_mutex);
        iput(inode);
index 3aab10ce63e84812b8e9c5a9250235db9b1bbe86..e913328d0f2adc9c3beb2c37556a479c7615d2fa 100644 (file)
@@ -921,7 +921,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
                return 0;
        }
 
-       btrfs_wait_all_ordered_extents(fs_info, 1);
+       btrfs_wait_all_ordered_extents(fs_info);
 
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
@@ -1340,6 +1340,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
        } else {
+               if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+                       btrfs_err(fs_info,
+                               "Remounting read-write after error is not allowed\n");
+                       ret = -EINVAL;
+                       goto restore;
+               }
                if (fs_info->fs_devices->rw_devices == 0) {
                        ret = -EACCES;
                        goto restore;
@@ -1377,6 +1383,16 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        pr_warn("btrfs: failed to resume dev_replace\n");
                        goto restore;
                }
+
+               if (!fs_info->uuid_root) {
+                       pr_info("btrfs: creating UUID tree\n");
+                       ret = btrfs_create_uuid_tree(fs_info);
+                       if (ret) {
+                               pr_warn("btrfs: failed to create the uuid tree"
+                                       "%d\n", ret);
+                               goto restore;
+                       }
+               }
                sb->s_flags &= ~MS_RDONLY;
        }
 out:
@@ -1762,6 +1778,9 @@ static void btrfs_print_info(void)
 #ifdef CONFIG_BTRFS_DEBUG
                        ", debug=on"
 #endif
+#ifdef CONFIG_BTRFS_ASSERT
+                       ", assert=on"
+#endif
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
                        ", integrity-checker=on"
 #endif
index cac4a3f763230f7e448ca6de68a0859a42b13af8..8c81bdc1ef9bae82c92e5a8836a0f911c1547a55 100644 (file)
@@ -1603,7 +1603,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
        if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
-               btrfs_wait_all_ordered_extents(fs_info, 1);
+               btrfs_wait_all_ordered_extents(fs_info);
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -1838,11 +1838,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        assert_qgroups_uptodate(trans);
        update_super_roots(root);
 
-       if (!root->fs_info->log_root_recovering) {
-               btrfs_set_super_log_root(root->fs_info->super_copy, 0);
-               btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
-       }
-
+       btrfs_set_super_log_root(root->fs_info->super_copy, 0);
+       btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
        memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
               sizeof(*root->fs_info->super_copy));
 
index 0d9613c3f5e507cc6087decdbc3ef43e3436b396..79f057c0619a5cfe29a6e47dcd3ebeccebce9809 100644 (file)
@@ -93,7 +93,8 @@
  */
 #define LOG_WALK_PIN_ONLY 0
 #define LOG_WALK_REPLAY_INODES 1
-#define LOG_WALK_REPLAY_ALL 2
+#define LOG_WALK_REPLAY_DIR_INDEX 2
+#define LOG_WALK_REPLAY_ALL 3
 
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, struct inode *inode,
@@ -393,6 +394,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                if (inode_item) {
                        struct btrfs_inode_item *item;
                        u64 nbytes;
+                       u32 mode;
 
                        item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                              struct btrfs_inode_item);
@@ -400,9 +402,19 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                        item = btrfs_item_ptr(eb, slot,
                                              struct btrfs_inode_item);
                        btrfs_set_inode_nbytes(eb, item, nbytes);
+
+                       /*
+                        * If this is a directory we need to reset the i_size to
+                        * 0 so that we can set it up properly when replaying
+                        * the rest of the items in this log.
+                        */
+                       mode = btrfs_inode_mode(eb, item);
+                       if (S_ISDIR(mode))
+                               btrfs_set_inode_size(eb, item, 0);
                }
        } else if (inode_item) {
                struct btrfs_inode_item *item;
+               u32 mode;
 
                /*
                 * New inode, set nbytes to 0 so that the nbytes comes out
@@ -410,6 +422,15 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                 */
                item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
                btrfs_set_inode_nbytes(eb, item, 0);
+
+               /*
+                * If this is a directory we need to reset the i_size to 0 so
+                * that we can set it up properly when replaying the rest of
+                * the items in this log.
+                */
+               mode = btrfs_inode_mode(eb, item);
+               if (S_ISDIR(mode))
+                       btrfs_set_inode_size(eb, item, 0);
        }
 insert:
        btrfs_release_path(path);
@@ -1496,6 +1517,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
                iput(inode);
                return -EIO;
        }
+
        ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
 
        /* FIXME, put inode into FIXUP list */
@@ -1534,6 +1556,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        u8 log_type;
        int exists;
        int ret = 0;
+       bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
 
        dir = read_one_inode(root, key->objectid);
        if (!dir)
@@ -1604,6 +1627,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                goto insert;
 out:
        btrfs_release_path(path);
+       if (!ret && update_size) {
+               btrfs_i_size_write(dir, dir->i_size + name_len * 2);
+               ret = btrfs_update_inode(trans, root, dir);
+       }
        kfree(name);
        iput(dir);
        return ret;
@@ -1614,6 +1641,7 @@ insert:
                              name, name_len, log_type, &log_key);
        if (ret && ret != -ENOENT)
                goto out;
+       update_size = false;
        ret = 0;
        goto out;
 }
@@ -2027,6 +2055,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                        if (ret)
                                break;
                }
+
+               if (key.type == BTRFS_DIR_INDEX_KEY &&
+                   wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
+                       ret = replay_one_dir_item(wc->trans, root, path,
+                                                 eb, i, &key);
+                       if (ret)
+                               break;
+               }
+
                if (wc->stage < LOG_WALK_REPLAY_ALL)
                        continue;
 
@@ -2048,8 +2085,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                                                eb, i, &key);
                        if (ret)
                                break;
-               } else if (key.type == BTRFS_DIR_ITEM_KEY ||
-                          key.type == BTRFS_DIR_INDEX_KEY) {
+               } else if (key.type == BTRFS_DIR_ITEM_KEY) {
                        ret = replay_one_dir_item(wc->trans, root, path,
                                                  eb, i, &key);
                        if (ret)
@@ -3805,6 +3841,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
        int ret = 0;
        struct btrfs_root *root;
        struct dentry *old_parent = NULL;
+       struct inode *orig_inode = inode;
 
        /*
         * for regular files, if its inode is already on disk, we don't
@@ -3824,7 +3861,14 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
        }
 
        while (1) {
-               BTRFS_I(inode)->logged_trans = trans->transid;
+               /*
+                * If we are logging a directory then we start with our inode,
+                * not our parents inode, so we need to skipp setting the
+                * logged_trans so that further down in the log code we don't
+                * think this inode has already been logged.
+                */
+               if (inode != orig_inode)
+                       BTRFS_I(inode)->logged_trans = trans->transid;
                smp_mb();
 
                if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
index 0052ca8264d9b37cc171e52e0954b165fd6dad46..043b215769c2c68c538ea147d5cde0721221b833 100644 (file)
@@ -796,7 +796,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                        fs_devices->rotating = 1;
 
                fs_devices->open_devices++;
-               if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+               if (device->writeable &&
+                   device->devid != BTRFS_DEV_REPLACE_DEVID) {
                        fs_devices->rw_devices++;
                        list_add(&device->dev_alloc_list,
                                 &fs_devices->alloc_list);
@@ -911,9 +912,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        if (disk_super->label[0]) {
                if (disk_super->label[BTRFS_LABEL_SIZE - 1])
                        disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
-               printk(KERN_INFO "device label %s ", disk_super->label);
+               printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
        } else {
-               printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
+               printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
        }
 
        printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
@@ -1715,6 +1716,7 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
                                 struct btrfs_device *srcdev)
 {
        WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
+
        list_del_rcu(&srcdev->dev_list);
        list_del_rcu(&srcdev->dev_alloc_list);
        fs_info->fs_devices->num_devices--;
@@ -1724,9 +1726,13 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
        }
        if (srcdev->can_discard)
                fs_info->fs_devices->num_can_discard--;
-       if (srcdev->bdev)
+       if (srcdev->bdev) {
                fs_info->fs_devices->open_devices--;
 
+               /* zero out the old super */
+               btrfs_scratch_superblock(srcdev);
+       }
+
        call_rcu(&srcdev->rcu, free_device);
 }
 
index 4d7433534f5cd77b7f9b240fba57ac7923df07e6..6024877335caf2a9dfa6af1018c5da19b0e8a2ae 100644 (file)
@@ -1005,9 +1005,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct buffer_head *bh;
        sector_t end_block;
        int ret = 0;            /* Will call free_more_memory() */
+       gfp_t gfp_mask;
 
-       page = find_or_create_page(inode->i_mapping, index,
-               (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+       gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS;
+       gfp_mask |= __GFP_MOVABLE;
+       /*
+        * XXX: __getblk_slow() can not really deal with failure and
+        * will endlessly loop on improvised global reclaim.  Prefer
+        * looping in the allocator rather than here, at least that
+        * code knows what it's doing.
+        */
+       gfp_mask |= __GFP_NOFAIL;
+
+       page = find_or_create_page(inode->i_mapping, index, gfp_mask);
        if (!page)
                return ret;
 
index 43eb5592cdea83c83df854a489edea79d076cfda..00baf1419989f804ad56f1f27ed633f86d2d6628 100644 (file)
@@ -270,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
 #endif
 
        /* delete retired objects */
-       if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
+       if (test_bit(FSCACHE_OBJECT_RETIRED, &object->fscache.flags) &&
            _object != cache->cache.fsdef
            ) {
                _debug("- retire object OBJ%x", object->fscache.debug_id);
index 25badd1aec5c677215f20d9bd970261deab2dd35..f4a08d7fa2f70a58a8513110988cc4928a674bdb 100644 (file)
@@ -56,7 +56,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
                       object->fscache.cookie->parent,
                       object->fscache.cookie->netfs_data,
                       object->fscache.cookie->flags);
-               if (keybuf)
+               if (keybuf && cookie->def)
                        keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
                                                      CACHEFILES_KEYBUF_SIZE);
                else
index 34c88b83e39f1214b0ed7f2c22bc58f5acd00fa4..12b0eef84183be5edb35627b169cbe49a516a961 100644 (file)
@@ -162,8 +162,9 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
 int cachefiles_check_auxdata(struct cachefiles_object *object)
 {
        struct cachefiles_xattr *auxbuf;
+       enum fscache_checkaux validity;
        struct dentry *dentry = object->dentry;
-       unsigned int dlen;
+       ssize_t xlen;
        int ret;
 
        ASSERT(dentry);
@@ -174,22 +175,22 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
        if (!auxbuf)
                return -ENOMEM;
 
-       auxbuf->len = vfs_getxattr(dentry, cachefiles_xattr_cache,
-                                  &auxbuf->type, 512 + 1);
-       if (auxbuf->len < 1)
-               return -ESTALE;
-
-       if (auxbuf->type != object->fscache.cookie->def->type)
-               return -ESTALE;
+       xlen = vfs_getxattr(dentry, cachefiles_xattr_cache,
+                           &auxbuf->type, 512 + 1);
+       ret = -ESTALE;
+       if (xlen < 1 ||
+           auxbuf->type != object->fscache.cookie->def->type)
+               goto error;
 
-       dlen = auxbuf->len - 1;
-       ret = fscache_check_aux(&object->fscache, &auxbuf->data, dlen);
+       xlen--;
+       validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen);
+       if (validity != FSCACHE_CHECKAUX_OKAY)
+               goto error;
 
+       ret = 0;
+error:
        kfree(auxbuf);
-       if (ret != FSCACHE_CHECKAUX_OKAY)
-               return -ESTALE;
-
-       return 0;
+       return ret;
 }
 
 /*
index 6df8bd481425379006912990ee6f9461eaf3cf1b..1cb39e65288619ac4ee88cd6a4bd3ae0aa5ca10d 100644 (file)
@@ -1179,8 +1179,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
  * never get called.
  */
 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
-                             const struct iovec *iov,
-                             loff_t pos, unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t pos)
 {
        WARN_ON(1);
        return -EINVAL;
index 6bfe65e0b03831280b0e66e96d616be9ee55e6b1..8c44fdd4e1c39f836b2c8a9b2a7a025f1844d3b3 100644 (file)
@@ -68,7 +68,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
 {
        fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
                                              &ceph_fscache_fsid_object_def,
-                                             fsc);
+                                             fsc, true);
 
        if (fsc->fscache == NULL) {
                pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
@@ -204,7 +204,7 @@ void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
 
        ci->fscache = fscache_acquire_cookie(fsc->fscache,
                                             &ceph_fscache_inode_object_def,
-                                            ci);
+                                            ci, true);
 done:
        mutex_unlock(&inode->i_mutex);
 
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       if (!PageFsCache(page))
+               return;
+
        fscache_wait_on_page_write(ci->fscache, page);
        fscache_uncache_page(ci->fscache, page);
 }
index 13976c33332ec1fd7ca3999053b15b7079c5ab31..3c0a4bd7499645ca8bf90fd1a6ba16f6831c164c 100644 (file)
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
  * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
-void __ceph_remove_cap(struct ceph_cap *cap)
+void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 {
        struct ceph_mds_session *session = cap->session;
        struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
 
        /* remove from session list */
        spin_lock(&session->s_cap_lock);
+       /*
+        * s_cap_reconnect is protected by s_cap_lock. no one changes
+        * s_cap_gen while session is in the reconnect state.
+        */
+       if (queue_release &&
+           (!session->s_cap_reconnect ||
+            cap->cap_gen == session->s_cap_gen))
+               __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
+                                   cap->mseq, cap->issue_seq);
+
        if (session->s_cap_iterator == cap) {
                /* not yet, we are iterating over this very cap */
                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
        struct ceph_mds_cap_release *head;
        struct ceph_mds_cap_item *item;
 
-       spin_lock(&session->s_cap_lock);
        BUG_ON(!session->s_num_cap_releases);
        msg = list_first_entry(&session->s_cap_releases,
                               struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
                     (int)CEPH_CAPS_PER_RELEASE,
                     (int)msg->front.iov_len);
        }
-       spin_unlock(&session->s_cap_lock);
 }
 
 /*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
        p = rb_first(&ci->i_caps);
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
-               struct ceph_mds_session *session = cap->session;
-
-               __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
-                                   cap->mseq, cap->issue_seq);
                p = rb_next(p);
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, true);
        }
 }
 
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
                        }
                        spin_unlock(&mdsc->cap_dirty_lock);
                }
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, false);
        }
        /* else, we already released it */
 
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        if (!inode) {
                dout(" i don't have ino %llx\n", vino.ino);
 
-               if (op == CEPH_CAP_OP_IMPORT)
+               if (op == CEPH_CAP_OP_IMPORT) {
+                       spin_lock(&session->s_cap_lock);
                        __queue_cap_release(session, vino.ino, cap_id,
                                            mseq, seq);
+                       spin_unlock(&session->s_cap_lock);
+               }
                goto flush_cap_releases;
        }
 
index 868b61d56cac77f3a8328d5ba4851ec7947fe827..2a0bcaeb189acd18b124aff8d54619667fd97bf2 100644 (file)
@@ -352,8 +352,18 @@ more:
                }
 
                /* note next offset and last dentry name */
+               rinfo = &req->r_reply_info;
+               if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+                       frag = le32_to_cpu(rinfo->dir_dir->frag);
+                       if (ceph_frag_is_leftmost(frag))
+                               fi->next_offset = 2;
+                       else
+                               fi->next_offset = 0;
+                       off = fi->next_offset;
+               }
                fi->offset = fi->next_offset;
                fi->last_readdir = req;
+               fi->frag = frag;
 
                if (req->r_reply_info.dir_end) {
                        kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
                        else
                                fi->next_offset = 0;
                } else {
-                       rinfo = &req->r_reply_info;
                        err = note_last_dentry(fi,
                                       rinfo->dir_dname[rinfo->dir_nr-1],
                                       rinfo->dir_dname_len[rinfo->dir_nr-1]);
index 3de89829e2a162ab6bce2a58296b25aef9235c43..c4419e848a4f89eced64c8a156a560f62bc2cbdd 100644 (file)
@@ -408,51 +408,92 @@ more:
  *
  * If the read spans object boundary, just do multiple reads.
  */
-static ssize_t ceph_sync_read(struct file *file, char __user *data,
-                             unsigned len, loff_t *poff, int *checkeof)
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
+                               int *checkeof)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct page **pages;
-       u64 off = *poff;
+       u64 off = iocb->ki_pos;
        int num_pages, ret;
+       size_t len = i->count;
 
-       dout("sync_read on file %p %llu~%u %s\n", file, off, len,
+       dout("sync_read on file %p %llu~%u %s\n", file, off,
+            (unsigned)len,
             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
-
-       if (file->f_flags & O_DIRECT) {
-               num_pages = calc_pages_for((unsigned long)data, len);
-               pages = ceph_get_direct_page_vector(data, num_pages, true);
-       } else {
-               num_pages = calc_pages_for(off, len);
-               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
-       }
-       if (IS_ERR(pages))
-               return PTR_ERR(pages);
-
        /*
         * flush any page cache pages in this range.  this
         * will make concurrent normal and sync io slow,
         * but it will at least behave sensibly when they are
         * in sequence.
         */
-       ret = filemap_write_and_wait(inode->i_mapping);
+       ret = filemap_write_and_wait_range(inode->i_mapping, off,
+                                               off + len);
        if (ret < 0)
-               goto done;
+               return ret;
 
-       ret = striped_read(inode, off, len, pages, num_pages, checkeof,
-                          file->f_flags & O_DIRECT,
-                          (unsigned long)data & ~PAGE_MASK);
+       if (file->f_flags & O_DIRECT) {
+               while (iov_iter_count(i)) {
+                       void __user *data = i->iov[0].iov_base + i->iov_offset;
+                       size_t len = i->iov[0].iov_len - i->iov_offset;
+
+                       num_pages = calc_pages_for((unsigned long)data, len);
+                       pages = ceph_get_direct_page_vector(data,
+                                                           num_pages, true);
+                       if (IS_ERR(pages))
+                               return PTR_ERR(pages);
+
+                       ret = striped_read(inode, off, len,
+                                          pages, num_pages, checkeof,
+                                          1, (unsigned long)data & ~PAGE_MASK);
+                       ceph_put_page_vector(pages, num_pages, true);
+
+                       if (ret <= 0)
+                               break;
+                       off += ret;
+                       iov_iter_advance(i, ret);
+                       if (ret < len)
+                               break;
+               }
+       } else {
+               num_pages = calc_pages_for(off, len);
+               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
+               if (IS_ERR(pages))
+                       return PTR_ERR(pages);
+               ret = striped_read(inode, off, len, pages,
+                                       num_pages, checkeof, 0, 0);
+               if (ret > 0) {
+                       int l, k = 0;
+                       size_t left = len = ret;
+
+                       while (left) {
+                               void __user *data = i->iov[0].iov_base
+                                                       + i->iov_offset;
+                               l = min(i->iov[0].iov_len - i->iov_offset,
+                                       left);
+
+                               ret = ceph_copy_page_vector_to_user(&pages[k],
+                                                                   data, off,
+                                                                   l);
+                               if (ret > 0) {
+                                       iov_iter_advance(i, ret);
+                                       left -= ret;
+                                       off += ret;
+                                       k = calc_pages_for(iocb->ki_pos,
+                                                          len - left + 1) - 1;
+                                       BUG_ON(k >= num_pages && left);
+                               } else
+                                       break;
+                       }
+               }
+               ceph_release_page_vector(pages, num_pages);
+       }
 
-       if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
-               ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
-       if (ret >= 0)
-               *poff = off + ret;
+       if (off > iocb->ki_pos) {
+               ret = off - iocb->ki_pos;
+               iocb->ki_pos = off;
+       }
 
-done:
-       if (file->f_flags & O_DIRECT)
-               ceph_put_page_vector(pages, num_pages, true);
-       else
-               ceph_release_page_vector(pages, num_pages);
        dout("sync_read result %d\n", ret);
        return ret;
 }
@@ -489,83 +530,79 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
        }
 }
 
+
 /*
- * Synchronous write, straight from __user pointer or user pages (if
- * O_DIRECT).
+ * Synchronous write, straight from __user pointer or user pages.
  *
  * If write spans object boundary, just do multiple writes.  (For a
  * correct atomic write, we should e.g. take write locks on all
  * objects, rollback on failure, etc.)
  */
-static ssize_t ceph_sync_write(struct file *file, const char __user *data,
-                              size_t left, loff_t pos, loff_t *ppos)
+static ssize_t
+ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
+                      unsigned long nr_segs, size_t count)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_snap_context *snapc;
        struct ceph_vino vino;
        struct ceph_osd_request *req;
-       int num_ops = 1;
        struct page **pages;
        int num_pages;
-       u64 len;
        int written = 0;
        int flags;
        int check_caps = 0;
-       int page_align, io_align;
-       unsigned long buf_align;
+       int page_align;
        int ret;
        struct timespec mtime = CURRENT_TIME;
-       bool own_pages = false;
+       loff_t pos = iocb->ki_pos;
+       struct iov_iter i;
 
        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
-       dout("sync_write on file %p %lld~%u %s\n", file, pos,
-            (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+       dout("sync_direct_write on file %p %lld~%u\n", file, pos,
+            (unsigned)count);
 
-       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
+       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
        if (ret < 0)
                return ret;
 
        ret = invalidate_inode_pages2_range(inode->i_mapping,
                                            pos >> PAGE_CACHE_SHIFT,
-                                           (pos + left) >> PAGE_CACHE_SHIFT);
+                                           (pos + count) >> PAGE_CACHE_SHIFT);
        if (ret < 0)
                dout("invalidate_inode_pages2_range returned %d\n", ret);
 
        flags = CEPH_OSD_FLAG_ORDERSNAP |
                CEPH_OSD_FLAG_ONDISK |
                CEPH_OSD_FLAG_WRITE;
-       if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
-               flags |= CEPH_OSD_FLAG_ACK;
-       else
-               num_ops++;      /* Also include a 'startsync' command. */
 
-       /*
-        * we may need to do multiple writes here if we span an object
-        * boundary.  this isn't atomic, unfortunately.  :(
-        */
-more:
-       io_align = pos & ~PAGE_MASK;
-       buf_align = (unsigned long)data & ~PAGE_MASK;
-       len = left;
-
-       snapc = ci->i_snap_realm->cached_context;
-       vino = ceph_vino(inode);
-       req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
-                                   vino, pos, &len, num_ops,
-                                   CEPH_OSD_OP_WRITE, flags, snapc,
-                                   ci->i_truncate_seq, ci->i_truncate_size,
-                                   false);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       iov_iter_init(&i, iov, nr_segs, count, 0);
+
+       while (iov_iter_count(&i) > 0) {
+               void __user *data = i.iov->iov_base + i.iov_offset;
+               u64 len = i.iov->iov_len - i.iov_offset;
+
+               page_align = (unsigned long)data & ~PAGE_MASK;
+
+               snapc = ci->i_snap_realm->cached_context;
+               vino = ceph_vino(inode);
+               req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+                                           vino, pos, &len,
+                                           2,/*include a 'startsync' command*/
+                                           CEPH_OSD_OP_WRITE, flags, snapc,
+                                           ci->i_truncate_seq,
+                                           ci->i_truncate_size,
+                                           false);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       goto out;
+               }
 
-       /* write from beginning of first page, regardless of io alignment */
-       page_align = file->f_flags & O_DIRECT ? buf_align : io_align;
-       num_pages = calc_pages_for(page_align, len);
-       if (file->f_flags & O_DIRECT) {
+               num_pages = calc_pages_for(page_align, len);
                pages = ceph_get_direct_page_vector(data, num_pages, false);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
@@ -577,60 +614,175 @@ more:
                 * may block.
                 */
                truncate_inode_pages_range(inode->i_mapping, pos,
-                                          (pos+len) | (PAGE_CACHE_SIZE-1));
-       } else {
+                                  (pos+len) | (PAGE_CACHE_SIZE-1));
+               osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
+                                               false, false);
+
+               /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+               ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+
+               ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+               if (!ret)
+                       ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+               ceph_put_page_vector(pages, num_pages, false);
+
+out:
+               ceph_osdc_put_request(req);
+               if (ret == 0) {
+                       pos += len;
+                       written += len;
+                       iov_iter_advance(&i, (size_t)len);
+
+                       if (pos > i_size_read(inode)) {
+                               check_caps = ceph_inode_set_size(inode, pos);
+                               if (check_caps)
+                                       ceph_check_caps(ceph_inode(inode),
+                                                       CHECK_CAPS_AUTHONLY,
+                                                       NULL);
+                       }
+               } else
+                       break;
+       }
+
+       if (ret != -EOLDSNAPC && written > 0) {
+               iocb->ki_pos = pos;
+               ret = written;
+       }
+       return ret;
+}
+
+
+/*
+ * Synchronous write, straight from __user pointer or user pages.
+ *
+ * If write spans object boundary, just do multiple writes.  (For a
+ * correct atomic write, we should e.g. take write locks on all
+ * objects, rollback on failure, etc.)
+ */
+static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
+                              unsigned long nr_segs, size_t count)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_snap_context *snapc;
+       struct ceph_vino vino;
+       struct ceph_osd_request *req;
+       struct page **pages;
+       u64 len;
+       int num_pages;
+       int written = 0;
+       int flags;
+       int check_caps = 0;
+       int ret;
+       struct timespec mtime = CURRENT_TIME;
+       loff_t pos = iocb->ki_pos;
+       struct iov_iter i;
+
+       if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
+               return -EROFS;
+
+       dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
+
+       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
+       if (ret < 0)
+               return ret;
+
+       ret = invalidate_inode_pages2_range(inode->i_mapping,
+                                           pos >> PAGE_CACHE_SHIFT,
+                                           (pos + count) >> PAGE_CACHE_SHIFT);
+       if (ret < 0)
+               dout("invalidate_inode_pages2_range returned %d\n", ret);
+
+       flags = CEPH_OSD_FLAG_ORDERSNAP |
+               CEPH_OSD_FLAG_ONDISK |
+               CEPH_OSD_FLAG_WRITE |
+               CEPH_OSD_FLAG_ACK;
+
+       iov_iter_init(&i, iov, nr_segs, count, 0);
+
+       while ((len = iov_iter_count(&i)) > 0) {
+               size_t left;
+               int n;
+
+               snapc = ci->i_snap_realm->cached_context;
+               vino = ceph_vino(inode);
+               req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+                                           vino, pos, &len, 1,
+                                           CEPH_OSD_OP_WRITE, flags, snapc,
+                                           ci->i_truncate_seq,
+                                           ci->i_truncate_size,
+                                           false);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       goto out;
+               }
+
+               /*
+                * write from beginning of first page,
+                * regardless of io alignment
+                */
+               num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
                pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto out;
                }
-               ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
+
+               left = len;
+               for (n = 0; n < num_pages; n++) {
+                       size_t plen = min(left, PAGE_SIZE);
+                       ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
+                       if (ret != plen) {
+                               ret = -EFAULT;
+                               break;
+                       }
+                       left -= ret;
+                       iov_iter_advance(&i, ret);
+               }
+
                if (ret < 0) {
                        ceph_release_page_vector(pages, num_pages);
                        goto out;
                }
 
-               if ((file->f_flags & O_SYNC) == 0) {
-                       /* get a second commit callback */
-                       req->r_unsafe_callback = ceph_sync_write_unsafe;
-                       req->r_inode = inode;
-                       own_pages = true;
-               }
-       }
-       osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
-                                       false, own_pages);
+               /* get a second commit callback */
+               req->r_unsafe_callback = ceph_sync_write_unsafe;
+               req->r_inode = inode;
 
-       /* BUG_ON(vino.snap != CEPH_NOSNAP); */
-       ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+               osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
+                                               false, true);
 
-       ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
-       if (!ret)
-               ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+               /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+               ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
 
-       if (file->f_flags & O_DIRECT)
-               ceph_put_page_vector(pages, num_pages, false);
-       else if (file->f_flags & O_SYNC)
-               ceph_release_page_vector(pages, num_pages);
+               ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+               if (!ret)
+                       ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 
 out:
-       ceph_osdc_put_request(req);
-       if (ret == 0) {
-               pos += len;
-               written += len;
-               left -= len;
-               data += len;
-               if (left)
-                       goto more;
+               ceph_osdc_put_request(req);
+               if (ret == 0) {
+                       pos += len;
+                       written += len;
+
+                       if (pos > i_size_read(inode)) {
+                               check_caps = ceph_inode_set_size(inode, pos);
+                               if (check_caps)
+                                       ceph_check_caps(ceph_inode(inode),
+                                                       CHECK_CAPS_AUTHONLY,
+                                                       NULL);
+                       }
+               } else
+                       break;
+       }
 
+       if (ret != -EOLDSNAPC && written > 0) {
                ret = written;
-               *ppos = pos;
-               if (pos > i_size_read(inode))
-                       check_caps = ceph_inode_set_size(inode, pos);
-               if (check_caps)
-                       ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
-                                       NULL);
-       } else if (ret != -EOLDSNAPC && written > 0) {
-               ret = written;
+               iocb->ki_pos = pos;
        }
        return ret;
 }
@@ -647,55 +799,84 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *filp = iocb->ki_filp;
        struct ceph_file_info *fi = filp->private_data;
-       loff_t *ppos = &iocb->ki_pos;
-       size_t len = iov->iov_len;
+       size_t len = iocb->ki_nbytes;
        struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       void __user *base = iov->iov_base;
        ssize_t ret;
        int want, got = 0;
        int checkeof = 0, read = 0;
 
-       dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
-            inode, ceph_vinop(inode), pos, (unsigned)len, inode);
 again:
+       dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
+            inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
        else
                want = CEPH_CAP_FILE_CACHE;
        ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
        if (ret < 0)
-               goto out;
-       dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
-            inode, ceph_vinop(inode), pos, (unsigned)len,
-            ceph_cap_string(got));
+               return ret;
 
        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (fi->flags & CEPH_F_SYNC))
+           (fi->flags & CEPH_F_SYNC)) {
+               struct iov_iter i;
+
+               dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+                    inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+                    ceph_cap_string(got));
+
+               if (!read) {
+                       ret = generic_segment_checks(iov, &nr_segs,
+                                                       &len, VERIFY_WRITE);
+                       if (ret)
+                               goto out;
+               }
+
+               iov_iter_init(&i, iov, nr_segs, len, read);
+
                /* hmm, this isn't really async... */
-               ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
-       else
-               ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+               ret = ceph_sync_read(iocb, &i, &checkeof);
+       } else {
+               /*
+                * We can't modify the content of iov,
+                * so we only read from beginning.
+                */
+               if (read) {
+                       iocb->ki_pos = pos;
+                       len = iocb->ki_nbytes;
+                       read = 0;
+               }
+               dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+                    inode, ceph_vinop(inode), pos, (unsigned)len,
+                    ceph_cap_string(got));
 
+               ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+       }
 out:
        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
        ceph_put_cap_refs(ci, got);
 
        if (checkeof && ret >= 0) {
-               int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+               int statret = ceph_do_getattr(inode,
+                                             CEPH_STAT_CAP_SIZE);
 
                /* hit EOF or hole? */
-               if (statret == 0 && *ppos < inode->i_size) {
-                       dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
+               if (statret == 0 && iocb->ki_pos < inode->i_size &&
+                       ret < len) {
+                       dout("sync_read hit hole, ppos %lld < size %lld"
+                            ", reading more\n", iocb->ki_pos,
+                            inode->i_size);
+
                        read += ret;
-                       base += ret;
                        len -= ret;
                        checkeof = 0;
                        goto again;
                }
        }
+
        if (ret >= 0)
                ret += read;
 
@@ -772,11 +953,13 @@ retry_snap:
             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
-           (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (fi->flags & CEPH_F_SYNC)) {
+           (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
                mutex_unlock(&inode->i_mutex);
-               written = ceph_sync_write(file, iov->iov_base, count,
-                                         pos, &iocb->ki_pos);
+               if (file->f_flags & O_DIRECT)
+                       written = ceph_sync_direct_write(iocb, iov,
+                                                        nr_segs, count);
+               else
+                       written = ceph_sync_write(iocb, iov, nr_segs, count);
                if (written == -EOLDSNAPC) {
                        dout("aio_write %p %llx.%llx %llu~%u"
                                "got EOLDSNAPC, retrying\n",
index 8549a48115f71b23e1f35ef444caf3eb32dbced3..2ae1381de64a102c89d3fbe9b92dee3893fbbe7a 100644 (file)
@@ -436,6 +436,16 @@ void ceph_destroy_inode(struct inode *inode)
        call_rcu(&inode->i_rcu, ceph_i_callback);
 }
 
+int ceph_drop_inode(struct inode *inode)
+{
+       /*
+        * Positve dentry and corresponding inode are always accompanied
+        * in MDS reply. So no need to keep inode in the cache after
+        * dropping all its aliases.
+        */
+       return 1;
+}
+
 /*
  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
  * careful because either the client or MDS may have more up to date
@@ -577,6 +587,8 @@ static int fill_inode(struct inode *inode,
        int issued = 0, implemented;
        struct timespec mtime, atime, ctime;
        u32 nsplits;
+       struct ceph_inode_frag *frag;
+       struct rb_node *rb_node;
        struct ceph_buffer *xattr_blob = NULL;
        int err = 0;
        int queue_trunc = 0;
@@ -751,15 +763,38 @@ no_change:
        /* FIXME: move me up, if/when version reflects fragtree changes */
        nsplits = le32_to_cpu(info->fragtree.nsplits);
        mutex_lock(&ci->i_fragtree_mutex);
+       rb_node = rb_first(&ci->i_fragtree);
        for (i = 0; i < nsplits; i++) {
                u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
-               struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
-
-               if (IS_ERR(frag))
-                       continue;
+               frag = NULL;
+               while (rb_node) {
+                       frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+                       if (ceph_frag_compare(frag->frag, id) >= 0) {
+                               if (frag->frag != id)
+                                       frag = NULL;
+                               else
+                                       rb_node = rb_next(rb_node);
+                               break;
+                       }
+                       rb_node = rb_next(rb_node);
+                       rb_erase(&frag->node, &ci->i_fragtree);
+                       kfree(frag);
+                       frag = NULL;
+               }
+               if (!frag) {
+                       frag = __get_or_create_frag(ci, id);
+                       if (IS_ERR(frag))
+                               continue;
+               }
                frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
                dout(" frag %x split by %d\n", frag->frag, frag->split_by);
        }
+       while (rb_node) {
+               frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+               rb_node = rb_next(rb_node);
+               rb_erase(&frag->node, &ci->i_fragtree);
+               kfree(frag);
+       }
        mutex_unlock(&ci->i_fragtree_mutex);
 
        /* were we issued a capability? */
@@ -1250,8 +1285,20 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        int err = 0, i;
        struct inode *snapdir = NULL;
        struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
-       u64 frag = le32_to_cpu(rhead->args.readdir.frag);
        struct ceph_dentry_info *di;
+       u64 r_readdir_offset = req->r_readdir_offset;
+       u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+
+       if (rinfo->dir_dir &&
+           le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+               dout("readdir_prepopulate got new frag %x -> %x\n",
+                    frag, le32_to_cpu(rinfo->dir_dir->frag));
+               frag = le32_to_cpu(rinfo->dir_dir->frag);
+               if (ceph_frag_is_leftmost(frag))
+                       r_readdir_offset = 2;
+               else
+                       r_readdir_offset = 0;
+       }
 
        if (req->r_aborted)
                return readdir_prepopulate_inodes_only(req, session);
@@ -1315,7 +1362,7 @@ retry_lookup:
                }
 
                di = dn->d_fsdata;
-               di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+               di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
 
                /* inode */
                if (dn->d_inode) {
index b7bda5d9611da031aaf6f104ece9fa6351993070..6d953ab0ac06c08463c6134639d658a7ce90f304 100644 (file)
@@ -43,6 +43,7 @@
  */
 
 struct ceph_reconnect_state {
+       int nr_caps;
        struct ceph_pagelist *pagelist;
        bool flock;
 };
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        INIT_LIST_HEAD(&s->s_waiting);
        INIT_LIST_HEAD(&s->s_unsafe);
        s->s_num_cap_releases = 0;
+       s->s_cap_reconnect = 0;
        s->s_cap_iterator = NULL;
        INIT_LIST_HEAD(&s->s_cap_releases);
        INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -986,7 +988,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
        spin_lock(&ci->i_ceph_lock);
-       __ceph_remove_cap(cap);
+       __ceph_remove_cap(cap, false);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1233,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
-               __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
-                                   cap->mseq, cap->issue_seq);
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, true);
        } else {
                /* try to drop referring dentries */
                spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1416,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
        unsigned num;
 
        dout("discard_cap_releases mds%d\n", session->s_mds);
-       spin_lock(&session->s_cap_lock);
 
        /* zero out the in-progress message */
        msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1442,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
                msg->front.iov_len = sizeof(*head);
                list_add(&msg->list_head, &session->s_cap_releases);
        }
-
-       spin_unlock(&session->s_cap_lock);
 }
 
 /*
@@ -2238,8 +2235,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
        if (err == 0) {
                if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
-                                   req->r_op == CEPH_MDS_OP_LSSNAP) &&
-                   rinfo->dir_nr)
+                                   req->r_op == CEPH_MDS_OP_LSSNAP))
                        ceph_readdir_prepopulate(req, req->r_session);
                ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
        }
@@ -2490,6 +2486,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
        cap->mseq = 0;       /* and migrate_seq */
+       cap->cap_gen = cap->session->s_cap_gen;
 
        if (recon_state->flock) {
                rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2549,8 @@ encode_again:
        } else {
                err = ceph_pagelist_append(pagelist, &rec, reclen);
        }
+
+       recon_state->nr_caps++;
 out_free:
        kfree(path);
 out_dput:
@@ -2579,6 +2578,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        struct rb_node *p;
        int mds = session->s_mds;
        int err = -ENOMEM;
+       int s_nr_caps;
        struct ceph_pagelist *pagelist;
        struct ceph_reconnect_state recon_state;
 
@@ -2610,20 +2610,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        dout("session %p state %s\n", session,
             session_state_name(session->s_state));
 
+       spin_lock(&session->s_gen_ttl_lock);
+       session->s_cap_gen++;
+       spin_unlock(&session->s_gen_ttl_lock);
+
+       spin_lock(&session->s_cap_lock);
+       /*
+        * notify __ceph_remove_cap() that we are composing cap reconnect.
+        * If a cap get released before being added to the cap reconnect,
+        * __ceph_remove_cap() should skip queuing cap release.
+        */
+       session->s_cap_reconnect = 1;
        /* drop old cap expires; we're about to reestablish that state */
        discard_cap_releases(mdsc, session);
+       spin_unlock(&session->s_cap_lock);
 
        /* traverse this session's caps */
-       err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+       s_nr_caps = session->s_nr_caps;
+       err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
        if (err)
                goto fail;
 
+       recon_state.nr_caps = 0;
        recon_state.pagelist = pagelist;
        recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
        err = iterate_session_caps(session, encode_caps_cb, &recon_state);
        if (err < 0)
                goto fail;
 
+       spin_lock(&session->s_cap_lock);
+       session->s_cap_reconnect = 0;
+       spin_unlock(&session->s_cap_lock);
+
        /*
         * snaprealms.  we provide mds with the ino, seq (version), and
         * parent for all of our realms.  If the mds has any newer info,
@@ -2646,11 +2664,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
 
        if (recon_state.flock)
                reply->hdr.version = cpu_to_le16(2);
-       if (pagelist->length) {
-               /* set up outbound data if we have any */
-               reply->hdr.data_len = cpu_to_le32(pagelist->length);
-               ceph_msg_data_add_pagelist(reply, pagelist);
+
+       /* raced with cap release? */
+       if (s_nr_caps != recon_state.nr_caps) {
+               struct page *page = list_first_entry(&pagelist->head,
+                                                    struct page, lru);
+               __le32 *addr = kmap_atomic(page);
+               *addr = cpu_to_le32(recon_state.nr_caps);
+               kunmap_atomic(addr);
        }
+
+       reply->hdr.data_len = cpu_to_le32(pagelist->length);
+       ceph_msg_data_add_pagelist(reply, pagelist);
        ceph_con_send(&session->s_con, reply);
 
        mutex_unlock(&session->s_mutex);
index c2a19fbbe5177b619b7a3d7e6132b626df8c8508..4c053d099ae4e60400dbcbdcce21844138ba8a47 100644 (file)
@@ -132,6 +132,7 @@ struct ceph_mds_session {
        struct list_head  s_caps;     /* all caps issued by this session */
        int               s_nr_caps, s_trim_caps;
        int               s_num_cap_releases;
+       int               s_cap_reconnect;
        struct list_head  s_cap_releases; /* waiting cap_release messages */
        struct list_head  s_cap_releases_done; /* ready to send */
        struct ceph_cap  *s_cap_iterator;
index 6a0951e4304441a241ca8fe550aba36cc097c271..e58bd4a23bfb532bd2c119345bb4669be1d9336b 100644 (file)
@@ -686,6 +686,7 @@ static const struct super_operations ceph_super_ops = {
        .alloc_inode    = ceph_alloc_inode,
        .destroy_inode  = ceph_destroy_inode,
        .write_inode    = ceph_write_inode,
+       .drop_inode     = ceph_drop_inode,
        .sync_fs        = ceph_sync_fs,
        .put_super      = ceph_put_super,
        .show_options   = ceph_show_options,
index 6014b0a3c405cb12dfb62fdac7887f83a4977b96..8de94b564d670d28a033d6ccfc309a37f799ab57 100644 (file)
@@ -691,6 +691,7 @@ extern const struct inode_operations ceph_file_iops;
 
 extern struct inode *ceph_alloc_inode(struct super_block *sb);
 extern void ceph_destroy_inode(struct inode *inode);
+extern int ceph_drop_inode(struct inode *inode);
 
 extern struct inode *ceph_get_inode(struct super_block *sb,
                                    struct ceph_vino vino);
@@ -741,13 +742,7 @@ extern int ceph_add_cap(struct inode *inode,
                        int fmode, unsigned issued, unsigned wanted,
                        unsigned cap, unsigned seq, u64 realmino, int flags,
                        struct ceph_cap_reservation *caps_reservation);
-extern void __ceph_remove_cap(struct ceph_cap *cap);
-static inline void ceph_remove_cap(struct ceph_cap *cap)
-{
-       spin_lock(&cap->ci->i_ceph_lock);
-       __ceph_remove_cap(cap);
-       spin_unlock(&cap->ci->i_ceph_lock);
-}
+extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
 
index a16b4e58bcc62ee88f9772f75120ef250d0112bd..849f6132b327e5e79e35bd4a178784eea001b267 100644 (file)
@@ -120,14 +120,16 @@ cifs_read_super(struct super_block *sb)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
+       struct cifs_tcon *tcon;
        int rc = 0;
 
        cifs_sb = CIFS_SB(sb);
+       tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
                sb->s_flags |= MS_POSIXACL;
 
-       if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+       if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
        else
                sb->s_maxbytes = MAX_NON_LFS;
@@ -147,7 +149,7 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
-       if (cifs_sb_master_tcon(cifs_sb)->nocase)
+       if (tcon->nocase)
                sb->s_d_op = &cifs_ci_dentry_ops;
        else
                sb->s_d_op = &cifs_dentry_ops;
@@ -860,7 +862,7 @@ const struct inode_operations cifs_file_inode_ops = {
 const struct inode_operations cifs_symlink_inode_ops = {
        .readlink = generic_readlink,
        .follow_link = cifs_follow_link,
-       .put_link = cifs_put_link,
+       .put_link = kfree_put_link,
        .permission = cifs_permission,
        /* BB add the following two eventually */
        /* revalidate: cifs_revalidate,
index ea723a5e8226231d64cd85eab065c7f91801a3a0..26a754f49ba1933259c0711d1e37ece069b3030c 100644 (file)
@@ -115,8 +115,6 @@ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
 
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry,
-                         struct nameidata *nd, void *);
 extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
                         int buflen);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
@@ -132,5 +130,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.01"
+#define CIFS_VERSION   "2.02"
 #endif                         /* _CIFSFS_H */
index cfa14c80ef3b6a76f5a1cfe1de5c8a6878d988e3..a67cf12a1c01b873a6d824445e2d5c8eeca61258 100644 (file)
@@ -278,6 +278,8 @@ struct smb_version_operations {
        /* set attributes */
        int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
                             const unsigned int);
+       int (*set_compression)(const unsigned int, struct cifs_tcon *,
+                              struct cifsFileInfo *);
        /* check if we can send an echo or nor */
        bool (*can_echo)(struct TCP_Server_Info *);
        /* send echo request */
@@ -379,6 +381,9 @@ struct smb_version_operations {
        char * (*create_lease_buf)(u8 *, u8);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *, unsigned int *);
+       int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
+                       struct cifsFileInfo *target_file, u64 src_off, u64 len,
+                       u64 dest_off);
 };
 
 struct smb_version_values {
@@ -547,9 +552,6 @@ struct TCP_Server_Info {
        unsigned int max_rw;    /* maxRw specifies the maximum */
        /* message size the server can send or receive for */
        /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
-       unsigned int max_vcs;   /* maximum number of smb sessions, at least
-                                  those that can be specified uniquely with
-                                  vcnumbers */
        unsigned int capabilities; /* selective disabling of caps by smb sess */
        int timeAdj;  /* Adjust for difference in server time zone in sec */
        __u64 CurrentMid;         /* multiplex id - rotating counter */
@@ -715,7 +717,6 @@ struct cifs_ses {
        enum statusEnum status;
        unsigned overrideSecFlg;  /* if non-zero override global sec flags */
        __u16 ipc_tid;          /* special tid for connection to IPC share */
-       __u16 vcnum;
        char *serverOS;         /* name of operating system underlying server */
        char *serverNOS;        /* name of network operating system of server */
        char *serverDomain;     /* security realm of server */
@@ -832,6 +833,8 @@ struct cifs_tcon {
        __u32 maximal_access;
        __u32 vol_serial_number;
        __le64 vol_create_time;
+       __u32 ss_flags;         /* sector size flags */
+       __u32 perf_sector_size; /* best sector size for perf */
 #endif /* CONFIG_CIFS_SMB2 */
 #ifdef CONFIG_CIFS_FSCACHE
        u64 resource_id;                /* server resource id */
@@ -1272,6 +1275,7 @@ struct dfs_info3_param {
 #define CIFS_FATTR_DELETE_PENDING      0x2
 #define CIFS_FATTR_NEED_REVAL          0x4
 #define CIFS_FATTR_INO_COLLISION       0x8
+#define CIFS_FATTR_UNKNOWN_NLINK       0x10
 
 struct cifs_fattr {
        u32             cf_flags;
index 948676db8e2ea5f65d276535caa087c86792b18c..f9bb4974161abf730bd9f3517c5c6d511553a956 100644 (file)
@@ -1352,6 +1352,35 @@ typedef struct smb_com_transaction_ioctl_req {
        __u8 Data[1];
 } __attribute__((packed)) TRANSACT_IOCTL_REQ;
 
+typedef struct smb_com_transaction_compr_ioctl_req {
+       struct smb_hdr hdr;     /* wct = 23 */
+       __u8 MaxSetupCount;
+       __u16 Reserved;
+       __le32 TotalParameterCount;
+       __le32 TotalDataCount;
+       __le32 MaxParameterCount;
+       __le32 MaxDataCount;
+       __le32 ParameterCount;
+       __le32 ParameterOffset;
+       __le32 DataCount;
+       __le32 DataOffset;
+       __u8 SetupCount; /* four setup words follow subcommand */
+       /* SNIA spec incorrectly included spurious pad here */
+       __le16 SubCommand; /* 2 = IOCTL/FSCTL */
+       __le32 FunctionCode;
+       __u16 Fid;
+       __u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
+       __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+       __le16 ByteCount;
+       __u8 Pad[3];
+       __le16 compression_state;  /* See below for valid flags */
+} __attribute__((packed)) TRANSACT_COMPR_IOCTL_REQ;
+
+/* compression state flags */
+#define COMPRESSION_FORMAT_NONE                0x0000
+#define COMPRESSION_FORMAT_DEFAULT     0x0001
+#define COMPRESSION_FORMAT_LZNT1       0x0002
+
 typedef struct smb_com_transaction_ioctl_rsp {
        struct smb_hdr hdr;     /* wct = 19 */
        __u8 Reserved[3];
@@ -1491,15 +1520,30 @@ struct file_notify_information {
        __u8  FileName[0];
 } __attribute__((packed));
 
-struct reparse_data {
-       __u32   ReparseTag;
-       __u16   ReparseDataLength;
+/* For IO_REPARSE_TAG_SYMLINK */
+struct reparse_symlink_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
        __u16   Reserved;
-       __u16   SubstituteNameOffset;
-       __u16   SubstituteNameLength;
-       __u16   PrintNameOffset;
-       __u16   PrintNameLength;
-       __u32   Flags;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __le32  Flags;
+       char    PathBuffer[0];
+} __attribute__((packed));
+
+/* For IO_REPARSE_TAG_NFS */
+#define NFS_SPECFILE_LNK       0x00000000014B4E4C
+#define NFS_SPECFILE_CHR       0x0000000000524843
+#define NFS_SPECFILE_BLK       0x00000000004B4C42
+#define NFS_SPECFILE_FIFO      0x000000004F464946
+#define NFS_SPECFILE_SOCK      0x000000004B434F53
+struct reparse_posix_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le64  InodeType; /* LNK, FIFO, CHR etc. */
        char    PathBuffer[0];
 } __attribute__((packed));
 
@@ -2200,6 +2244,9 @@ typedef struct {
        __le32 DeviceCharacteristics;
 } __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
 
+/* minimum includes first three fields, and empty FS Name */
+#define MIN_FS_ATTR_INFO_SIZE 12
+
 typedef struct {
        __le32 Attributes;
        __le32 MaxPathNameComponentLength;
@@ -2652,26 +2699,7 @@ typedef struct file_xattr_info {
 } __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
                                              level 0x205 */
 
-
-/* flags for chattr command */
-#define EXT_SECURE_DELETE              0x00000001 /* EXT3_SECRM_FL */
-#define EXT_ENABLE_UNDELETE            0x00000002 /* EXT3_UNRM_FL */
-/* Reserved for compress file 0x4 */
-#define EXT_SYNCHRONOUS                        0x00000008 /* EXT3_SYNC_FL */
-#define EXT_IMMUTABLE_FL               0x00000010 /* EXT3_IMMUTABLE_FL */
-#define EXT_OPEN_APPEND_ONLY           0x00000020 /* EXT3_APPEND_FL */
-#define EXT_DO_NOT_BACKUP              0x00000040 /* EXT3_NODUMP_FL */
-#define EXT_NO_UPDATE_ATIME            0x00000080 /* EXT3_NOATIME_FL */
-/* 0x100 through 0x800 reserved for compression flags and are GET-ONLY */
-#define EXT_HASH_TREE_INDEXED_DIR      0x00001000 /* GET-ONLY EXT3_INDEX_FL */
-/* 0x2000 reserved for IMAGIC_FL */
-#define EXT_JOURNAL_THIS_FILE  0x00004000 /* GET-ONLY EXT3_JOURNAL_DATA_FL */
-/* 0x8000 reserved for EXT3_NOTAIL_FL */
-#define EXT_SYNCHRONOUS_DIR            0x00010000 /* EXT3_DIRSYNC_FL */
-#define EXT_TOPDIR                     0x00020000 /* EXT3_TOPDIR_FL */
-
-#define EXT_SET_MASK                   0x000300FF
-#define EXT_GET_MASK                   0x0003DFFF
+/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
 
 typedef struct file_chattr_info {
        __le64  mask; /* list of all possible attribute bits */
index b5ec2a268f560c77424744c55266480f25f3c8bb..aa3397620342d20beba92abc732845273d3663ca 100644 (file)
@@ -360,6 +360,8 @@ extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                               __u16 fid, char **symlinkinfo,
                               const struct nls_table *nls_codepage);
+extern int CIFSSMB_set_compression(const unsigned int xid,
+                                  struct cifs_tcon *tcon, __u16 fid);
 extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
                        const char *fileName, const int disposition,
                        const int access_flags, const int omode,
index a3d74fea16233ef9d4c32a0179b4b96cccb8fa2b..93b29474714a0cdba1356c85c049e874dbc195dd 100644 (file)
@@ -463,7 +463,6 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
                               cifs_max_pending);
        set_credits(server, server->maxReq);
        server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
-       server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
        /* even though we do not use raw we might as well set this
        accurately, in case we ever find a need for it */
        if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -3089,7 +3088,8 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
        bool is_unicode;
        unsigned int sub_len;
        char *sub_start;
-       struct reparse_data *reparse_buf;
+       struct reparse_symlink_data *reparse_buf;
+       struct reparse_posix_data *posix_buf;
        __u32 data_offset, data_count;
        char *end_of_smb;
 
@@ -3138,20 +3138,47 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                goto qreparse_out;
        }
        end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
-       reparse_buf = (struct reparse_data *)
+       reparse_buf = (struct reparse_symlink_data *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
        if ((char *)reparse_buf >= end_of_smb) {
                rc = -EIO;
                goto qreparse_out;
        }
-       if ((reparse_buf->PathBuffer + reparse_buf->PrintNameOffset +
-                               reparse_buf->PrintNameLength) > end_of_smb) {
+       if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
+               cifs_dbg(FYI, "NFS style reparse tag\n");
+               posix_buf =  (struct reparse_posix_data *)reparse_buf;
+
+               if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
+                       cifs_dbg(FYI, "unsupported file type 0x%llx\n",
+                                le64_to_cpu(posix_buf->InodeType));
+                       rc = -EOPNOTSUPP;
+                       goto qreparse_out;
+               }
+               is_unicode = true;
+               sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
+               if (posix_buf->PathBuffer + sub_len > end_of_smb) {
+                       cifs_dbg(FYI, "reparse buf beyond SMB\n");
+                       rc = -EIO;
+                       goto qreparse_out;
+               }
+               *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
+                               sub_len, is_unicode, nls_codepage);
+               goto qreparse_out;
+       } else if (reparse_buf->ReparseTag !=
+                       cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
+               rc = -EOPNOTSUPP;
+               goto qreparse_out;
+       }
+
+       /* Reparse tag is NTFS symlink */
+       sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
+                               reparse_buf->PathBuffer;
+       sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
+       if (sub_start + sub_len > end_of_smb) {
                cifs_dbg(FYI, "reparse buf beyond SMB\n");
                rc = -EIO;
                goto qreparse_out;
        }
-       sub_start = reparse_buf->SubstituteNameOffset + reparse_buf->PathBuffer;
-       sub_len = reparse_buf->SubstituteNameLength;
        if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
                is_unicode = true;
        else
@@ -3172,6 +3199,60 @@ qreparse_out:
        return rc;
 }
 
+int
+CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                   __u16 fid)
+{
+       int rc = 0;
+       int bytes_returned;
+       struct smb_com_transaction_compr_ioctl_req *pSMB;
+       struct smb_com_transaction_ioctl_rsp *pSMBr;
+
+       cifs_dbg(FYI, "Set compression for %u\n", fid);
+       rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+                     (void **) &pSMBr);
+       if (rc)
+               return rc;
+
+       pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+       pSMB->TotalParameterCount = 0;
+       pSMB->TotalDataCount = __constant_cpu_to_le32(2);
+       pSMB->MaxParameterCount = 0;
+       pSMB->MaxDataCount = 0;
+       pSMB->MaxSetupCount = 4;
+       pSMB->Reserved = 0;
+       pSMB->ParameterOffset = 0;
+       pSMB->DataCount = __constant_cpu_to_le32(2);
+       pSMB->DataOffset =
+               cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req,
+                               compression_state) - 4);  /* 84 */
+       pSMB->SetupCount = 4;
+       pSMB->SubCommand = __constant_cpu_to_le16(NT_TRANSACT_IOCTL);
+       pSMB->ParameterCount = 0;
+       pSMB->FunctionCode = __constant_cpu_to_le32(FSCTL_SET_COMPRESSION);
+       pSMB->IsFsctl = 1; /* FSCTL */
+       pSMB->IsRootFlag = 0;
+       pSMB->Fid = fid; /* file handle always le */
+       /* 3 byte pad, followed by 2 byte compress state */
+       pSMB->ByteCount = __constant_cpu_to_le16(5);
+       inc_rfc1001_len(pSMB, 5);
+
+       rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+                        (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+       if (rc)
+               cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc);
+
+       cifs_buf_release(pSMB);
+
+       /*
+        * Note: On -EAGAIN error only caller can retry on handle based calls
+        * since file handle passed in no longer valid.
+        */
+       return rc;
+}
+
+
 #ifdef CONFIG_CIFS_POSIX
 
 /*Convert an Access Control Entry from wire format to local POSIX xattr format*/
index eb955b525e55307a18c1b534e95e5bfbe8462764..cf6aedc59c218ecde3c778b3bdd3cc8ef15e971c 100644 (file)
@@ -2737,8 +2737,8 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
                /* go while there's data to be copied and no errors */
                if (copy && !rc) {
                        pdata = kmap(page);
-                       rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
-                                               (int)copy);
+                       rc = memcpy_toiovecend(iov_iter_iovec(&ii), pdata,
+                                              ii.iov_offset, (int)copy);
                        kunmap(page);
                        if (!rc) {
                                *copied += copy;
@@ -3254,6 +3254,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        /*
         * Reads as many pages as possible from fscache. Returns -ENOBUFS
         * immediately if the cookie is negative
+        *
+        * After this point, every page in the list might have PG_fscache set,
+        * so we will need to clean that up off of every page we don't use.
         */
        rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
                                         &num_pages);
@@ -3376,6 +3379,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                kref_put(&rdata->refcount, cifs_readdata_release);
        }
 
+       /* Any pages that have been shown to fscache but didn't get added to
+        * the pagecache must be uncached before they get returned to the
+        * allocator.
+        */
+       cifs_fscache_readpages_cancel(mapping->host, page_list);
        return rc;
 }
 
index 2f4bc5a58054e65475b675c4e591a72bbfb706b0..8d4b7bc8ae914edf292b16da534d700f6d870b1e 100644 (file)
@@ -27,7 +27,7 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
 {
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
-                               &cifs_fscache_server_index_def, server);
+                               &cifs_fscache_server_index_def, server, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
                 __func__, server, server->fscache);
 }
@@ -46,7 +46,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
 
        tcon->fscache =
                fscache_acquire_cookie(server->fscache,
-                               &cifs_fscache_super_index_def, tcon);
+                               &cifs_fscache_super_index_def, tcon, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
                 __func__, server->fscache, tcon->fscache);
 }
@@ -69,7 +69,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
                cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
-                               &cifs_fscache_inode_object_def, cifsi);
+                               &cifs_fscache_inode_object_def, cifsi, true);
                cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n",
                         __func__, tcon->fscache, cifsi->fscache);
        }
@@ -119,7 +119,7 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
                cifsi->fscache = fscache_acquire_cookie(
                                        cifs_sb_master_tcon(cifs_sb)->fscache,
                                        &cifs_fscache_inode_object_def,
-                                       cifsi);
+                                       cifsi, true);
                cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n",
                         __func__, cifsi->fscache, old);
        }
@@ -223,6 +223,13 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
                fscache_uncache_page(CIFS_I(inode)->fscache, page);
 }
 
+void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
+{
+       cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
+                __func__, CIFS_I(inode)->fscache, inode);
+       fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
+}
+
 void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
index 63539323e0b960f3ecf9788eca6e67ba0ec8ce67..24794b6cd8ec5d18ce9248a6469315a1d8eebf08 100644 (file)
@@ -54,6 +54,7 @@ extern int __cifs_readpages_from_fscache(struct inode *,
                                         struct address_space *,
                                         struct list_head *,
                                         unsigned *);
+extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
 
 extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
 
@@ -91,6 +92,13 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
                __cifs_readpage_to_fscache(inode, page);
 }
 
+static inline void cifs_fscache_readpages_cancel(struct inode *inode,
+                                                struct list_head *pages)
+{
+       if (CIFS_I(inode)->fscache)
+               return __cifs_fscache_readpages_cancel(inode, pages);
+}
+
 #else /* CONFIG_CIFS_FSCACHE */
 static inline int cifs_fscache_register(void) { return 0; }
 static inline void cifs_fscache_unregister(void) {}
@@ -131,6 +139,11 @@ static inline int cifs_readpages_from_fscache(struct inode *inode,
 static inline void cifs_readpage_to_fscache(struct inode *inode,
                        struct page *page) {}
 
+static inline void cifs_fscache_readpages_cancel(struct inode *inode,
+                                                struct list_head *pages)
+{
+}
+
 #endif /* CONFIG_CIFS_FSCACHE */
 
 #endif /* _CIFS_FSCACHE_H */
index f9ff9c173f78ad6a8abcc6afb56849109dbfd515..867b7cdc794a221a6eb21257d20bdc3f9e516248 100644 (file)
@@ -120,6 +120,33 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
        cifs_i->invalid_mapping = true;
 }
 
+/*
+ * copy nlink to the inode, unless it wasn't provided.  Provide
+ * sane values if we don't have an existing one and none was provided
+ */
+static void
+cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+{
+       /*
+        * if we're in a situation where we can't trust what we
+        * got from the server (readdir, some non-unix cases)
+        * fake reasonable values
+        */
+       if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
+               /* only provide fake values on a new inode */
+               if (inode->i_state & I_NEW) {
+                       if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
+                               set_nlink(inode, 2);
+                       else
+                               set_nlink(inode, 1);
+               }
+               return;
+       }
+
+       /* we trust the server, so update it */
+       set_nlink(inode, fattr->cf_nlink);
+}
+
 /* populate an inode with info from a cifs_fattr struct */
 void
 cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
@@ -134,7 +161,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        inode->i_mtime = fattr->cf_mtime;
        inode->i_ctime = fattr->cf_ctime;
        inode->i_rdev = fattr->cf_rdev;
-       set_nlink(inode, fattr->cf_nlink);
+       cifs_nlink_fattr_to_inode(inode, fattr);
        inode->i_uid = fattr->cf_uid;
        inode->i_gid = fattr->cf_gid;
 
@@ -541,6 +568,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
        fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
        fattr->cf_createtime = le64_to_cpu(info->CreationTime);
 
+       fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
        if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
                fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
                fattr->cf_dtype = DT_DIR;
@@ -548,7 +576,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                 * Server can return wrong NumberOfLinks value for directories
                 * when Unix extensions are disabled - fake it.
                 */
-               fattr->cf_nlink = 2;
+               if (!tcon->unix_ext)
+                       fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
        } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
                fattr->cf_mode = S_IFLNK;
                fattr->cf_dtype = DT_LNK;
@@ -561,11 +590,15 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                if (fattr->cf_cifsattrs & ATTR_READONLY)
                        fattr->cf_mode &= ~(S_IWUGO);
 
-               fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
-               if (fattr->cf_nlink < 1) {
-                       cifs_dbg(1, "replacing bogus file nlink value %u\n",
+               /*
+                * Don't accept zero nlink from non-unix servers unless
+                * delete is pending.  Instead mark it as unknown.
+                */
+               if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
+                   !info->DeletePending) {
+                       cifs_dbg(1, "bogus file nlink value %u\n",
                                fattr->cf_nlink);
-                       fattr->cf_nlink = 1;
+                       fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
                }
        }
 
index 3e0845585853e52c3f6be35c442b55991d71c5c2..d353f6cc55aad32b02a322f70f9cf1f539285433 100644 (file)
@@ -3,7 +3,7 @@
  *
  *   vfs operations that deal with io control
  *
- *   Copyright (C) International Business Machines  Corp., 2005,2007
+ *   Copyright (C) International Business Machines  Corp., 2005,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
  */
 
 #include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/btrfs.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifsfs.h"
 
+static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+                       unsigned long srcfd, u64 off, u64 len, u64 destoff)
+{
+       int rc;
+       struct cifsFileInfo *smb_file_target = dst_file->private_data;
+       struct inode *target_inode = file_inode(dst_file);
+       struct cifs_tcon *target_tcon;
+       struct fd src_file;
+       struct cifsFileInfo *smb_file_src;
+       struct inode *src_inode;
+       struct cifs_tcon *src_tcon;
+
+       cifs_dbg(FYI, "ioctl clone range\n");
+       /* the destination must be opened for writing */
+       if (!(dst_file->f_mode & FMODE_WRITE)) {
+               cifs_dbg(FYI, "file target not open for write\n");
+               return -EINVAL;
+       }
+
+       /* check if target volume is readonly and take reference */
+       rc = mnt_want_write_file(dst_file);
+       if (rc) {
+               cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc);
+               return rc;
+       }
+
+       src_file = fdget(srcfd);
+       if (!src_file.file) {
+               rc = -EBADF;
+               goto out_drop_write;
+       }
+
+       if ((!src_file.file->private_data) || (!dst_file->private_data)) {
+               rc = -EBADF;
+               cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+               goto out_fput;
+       }
+
+       rc = -EXDEV;
+       smb_file_target = dst_file->private_data;
+       smb_file_src = src_file.file->private_data;
+       src_tcon = tlink_tcon(smb_file_src->tlink);
+       target_tcon = tlink_tcon(smb_file_target->tlink);
+
+       /* check if source and target are on same tree connection */
+       if (src_tcon != target_tcon) {
+               cifs_dbg(VFS, "file copy src and target on different volume\n");
+               goto out_fput;
+       }
+
+       src_inode = src_file.file->f_dentry->d_inode;
+
+       /* Note: cifs case is easier than btrfs since server responsible for */
+       /* checks for proper open modes and file type and if it wants */
+       /* server could even support copy of range where source = target */
+
+       /* so we do not deadlock racing two ioctls on same files */
+       /* btrfs does a similar check */
+       if (target_inode < src_inode) {
+               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
+               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
+       } else {
+               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
+               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
+       }
+
+       /* determine range to clone */
+       rc = -EINVAL;
+       if (off + len > src_inode->i_size || off + len < off)
+               goto out_unlock;
+       if (len == 0)
+               len = src_inode->i_size - off;
+
+       cifs_dbg(FYI, "about to flush pages\n");
+       /* should we flush first and last page first */
+       truncate_inode_pages_range(&target_inode->i_data, destoff,
+                                  PAGE_CACHE_ALIGN(destoff + len)-1);
+
+       if (target_tcon->ses->server->ops->clone_range)
+               rc = target_tcon->ses->server->ops->clone_range(xid,
+                       smb_file_src, smb_file_target, off, len, destoff);
+
+       /* force revalidate of size and timestamps of target file now
+          that target is updated on the server */
+       CIFS_I(target_inode)->time = 0;
+out_unlock:
+       mutex_unlock(&src_inode->i_mutex);
+       mutex_unlock(&target_inode->i_mutex);
+out_fput:
+       fdput(src_file);
+out_drop_write:
+       mnt_drop_write_file(dst_file);
+       return rc;
+}
+
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 {
        struct inode *inode = file_inode(filep);
        int rc = -ENOTTY; /* strange error - but the precedent */
        unsigned int xid;
        struct cifs_sb_info *cifs_sb;
-#ifdef CONFIG_CIFS_POSIX
        struct cifsFileInfo *pSMBFile = filep->private_data;
        struct cifs_tcon *tcon;
        __u64   ExtAttrBits = 0;
-       __u64   ExtAttrMask = 0;
        __u64   caps;
-#endif /* CONFIG_CIFS_POSIX */
 
        xid = get_xid();
 
@@ -49,13 +146,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        cifs_sb = CIFS_SB(inode->i_sb);
 
        switch (command) {
-#ifdef CONFIG_CIFS_POSIX
                case FS_IOC_GETFLAGS:
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
                        caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+#ifdef CONFIG_CIFS_POSIX
                        if (CIFS_UNIX_EXTATTR_CAP & caps) {
+                               __u64   ExtAttrMask = 0;
                                rc = CIFSGetExtAttr(xid, tcon,
                                                    pSMBFile->fid.netfid,
                                                    &ExtAttrBits, &ExtAttrMask);
@@ -63,29 +161,53 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                        rc = put_user(ExtAttrBits &
                                                FS_FL_USER_VISIBLE,
                                                (int __user *)arg);
+                               if (rc != EOPNOTSUPP)
+                                       break;
+                       }
+#endif /* CONFIG_CIFS_POSIX */
+                       rc = 0;
+                       if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
+                               /* add in the compressed bit */
+                               ExtAttrBits = FS_COMPR_FL;
+                               rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE,
+                                             (int __user *)arg);
                        }
                        break;
-
                case FS_IOC_SETFLAGS:
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
                        caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
-                       if (CIFS_UNIX_EXTATTR_CAP & caps) {
-                               if (get_user(ExtAttrBits, (int __user *)arg)) {
-                                       rc = -EFAULT;
-                                       break;
-                               }
-                               /*
-                                * rc = CIFSGetExtAttr(xid, tcon,
-                                *                     pSMBFile->fid.netfid,
-                                *                     extAttrBits,
-                                *                     &ExtAttrMask);
-                                */
+
+                       if (get_user(ExtAttrBits, (int __user *)arg)) {
+                               rc = -EFAULT;
+                               break;
+                       }
+
+                       /*
+                        * if (CIFS_UNIX_EXTATTR_CAP & caps)
+                        *      rc = CIFSSetExtAttr(xid, tcon,
+                        *                     pSMBFile->fid.netfid,
+                        *                     extAttrBits,
+                        *                     &ExtAttrMask);
+                        * if (rc != EOPNOTSUPP)
+                        *      break;
+                        */
+
+                       /* Currently only flag we can set is compressed flag */
+                       if ((ExtAttrBits & FS_COMPR_FL) == 0)
+                               break;
+
+                       /* Try to set compress flag */
+                       if (tcon->ses->server->ops->set_compression) {
+                               rc = tcon->ses->server->ops->set_compression(
+                                                       xid, tcon, pSMBFile);
+                               cifs_dbg(FYI, "set compress flag rc %d\n", rc);
                        }
-                       cifs_dbg(FYI, "set flags not implemented yet\n");
                        break;
-#endif /* CONFIG_CIFS_POSIX */
+               case BTRFS_IOC_CLONE:
+                       rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
+                       break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
                        break;
index 7e36ceba0c7a72d797a798de500847d4fa6ac66d..cc0234710ddbb780cae5037b717a93d8a7d3d54d 100644 (file)
@@ -621,10 +621,3 @@ symlink_exit:
        free_xid(xid);
        return rc;
 }
-
-void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
-{
-       char *p = nd_get_link(nd);
-       if (!IS_ERR(p))
-               kfree(p);
-}
index af847e1cf1c1985f5ddadd07e10dbc0e38a162a2..651a5279607b968a255a528e5411e4f4b39ca438 100644 (file)
@@ -780,7 +780,9 @@ static const struct {
        ERRDOS, ERRnoaccess, 0xc0000290}, {
        ERRDOS, ERRbadfunc, 0xc000029c}, {
        ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
-       ERRDOS, ERRinvlevel, 0x007c0001}, };
+       ERRDOS, ERRinvlevel, 0x007c0001}, {
+       0, 0, 0 }
+};
 
 /*****************************************************************************
  Print an error message from the status code
index 42ef03be089f2bc9aaabd2e3f40d2b6d6ac207a8..53a75f3d0179231d395611f61f2897c9a52f33de 100644 (file)
@@ -180,6 +180,9 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
                fattr->cf_dtype = DT_REG;
        }
 
+       /* non-unix readdir doesn't provide nlink */
+       fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
+
        if (fattr->cf_cifsattrs & ATTR_READONLY)
                fattr->cf_mode &= ~S_IWUGO;
 
index 5f99b7f19e7870d72aa6f1945550c41ca34e173d..e87387dbf39fa1d24b19f245e86da4c907b0fdaf 100644 (file)
 #include <linux/slab.h>
 #include "cifs_spnego.h"
 
-/*
- * Checks if this is the first smb session to be reconnected after
- * the socket has been reestablished (so we know whether to use vc 0).
- * Called while holding the cifs_tcp_ses_lock, so do not block
- */
-static bool is_first_ses_reconnect(struct cifs_ses *ses)
-{
-       struct list_head *tmp;
-       struct cifs_ses *tmp_ses;
-
-       list_for_each(tmp, &ses->server->smb_ses_list) {
-               tmp_ses = list_entry(tmp, struct cifs_ses,
-                                    smb_ses_list);
-               if (tmp_ses->need_reconnect == false)
-                       return false;
-       }
-       /* could not find a session that was already connected,
-          this must be the first one we are reconnecting */
-       return true;
-}
-
-/*
- *     vc number 0 is treated specially by some servers, and should be the
- *      first one we request.  After that we can use vcnumbers up to maxvcs,
- *     one for each smb session (some Windows versions set maxvcs incorrectly
- *     so maxvc=1 can be ignored).  If we have too many vcs, we can reuse
- *     any vc but zero (some servers reset the connection on vcnum zero)
- *
- */
-static __le16 get_next_vcnum(struct cifs_ses *ses)
-{
-       __u16 vcnum = 0;
-       struct list_head *tmp;
-       struct cifs_ses *tmp_ses;
-       __u16 max_vcs = ses->server->max_vcs;
-       __u16 i;
-       int free_vc_found = 0;
-
-       /* Quoting the MS-SMB specification: "Windows-based SMB servers set this
-       field to one but do not enforce this limit, which allows an SMB client
-       to establish more virtual circuits than allowed by this value ... but
-       other server implementations can enforce this limit." */
-       if (max_vcs < 2)
-               max_vcs = 0xFFFF;
-
-       spin_lock(&cifs_tcp_ses_lock);
-       if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
-                       goto get_vc_num_exit;  /* vcnum will be zero */
-       for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
-               if (i == 0) /* this is the only connection, use vc 0 */
-                       break;
-
-               free_vc_found = 1;
-
-               list_for_each(tmp, &ses->server->smb_ses_list) {
-                       tmp_ses = list_entry(tmp, struct cifs_ses,
-                                            smb_ses_list);
-                       if (tmp_ses->vcnum == i) {
-                               free_vc_found = 0;
-                               break; /* found duplicate, try next vcnum */
-                       }
-               }
-               if (free_vc_found)
-                       break; /* we found a vcnumber that will work - use it */
-       }
-
-       if (i == 0)
-               vcnum = 0; /* for most common case, ie if one smb session, use
-                             vc zero.  Also for case when no free vcnum, zero
-                             is safest to send (some clients only send zero) */
-       else if (free_vc_found == 0)
-               vcnum = 1;  /* we can not reuse vc=0 safely, since some servers
-                               reset all uids on that, but 1 is ok. */
-       else
-               vcnum = i;
-       ses->vcnum = vcnum;
-get_vc_num_exit:
-       spin_unlock(&cifs_tcp_ses_lock);
-
-       return cpu_to_le16(vcnum);
-}
-
 static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
 {
        __u32 capabilities = 0;
@@ -128,7 +46,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
                                        CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
                                        USHRT_MAX));
        pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
-       pSMB->req.VcNumber = get_next_vcnum(ses);
+       pSMB->req.VcNumber = __constant_cpu_to_le16(1);
 
        /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
 
@@ -582,9 +500,9 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
                                return NTLMv2;
                        if (global_secflags & CIFSSEC_MAY_NTLM)
                                return NTLM;
-                       /* Fallthrough */
                default:
-                       return Unspecified;
+                       /* Fallthrough to attempt LANMAN authentication next */
+                       break;
                }
        case CIFS_NEGFLAVOR_LANMAN:
                switch (requested) {
index 8233b174de3d62c6e5a3223919e83db8f7c57016..ea99efe0ae3d5de4206add5153d68d9b4aede996 100644 (file)
@@ -806,6 +806,13 @@ out:
        return rc;
 }
 
+static int
+cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifsFileInfo *cfile)
+{
+       return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid);
+}
+
 static int
 cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     const char *path, struct cifs_sb_info *cifs_sb,
@@ -956,6 +963,7 @@ struct smb_version_operations smb1_operations = {
        .set_path_size = CIFSSMBSetEOF,
        .set_file_size = CIFSSMBSetFileSize,
        .set_file_info = smb_set_file_info,
+       .set_compression = cifs_set_compression,
        .echo = CIFSSMBEcho,
        .mkdir = CIFSSMBMkDir,
        .mkdir_setinfo = cifs_mkdir_setinfo,
index 861b332141440c35c3a1b56ec1587b0166399006..11dde4b24f8aa1dce05354cef65fa93feb82735f 100644 (file)
@@ -209,6 +209,94 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
+#ifdef CONFIG_CIFS_STATS2
+static int
+SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       unsigned int ret_data_len = 0;
+       struct network_interface_info_ioctl_rsp *out_buf;
+
+       rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+                       FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
+                       NULL /* no data input */, 0 /* no data input */,
+                       (char **)&out_buf, &ret_data_len);
+
+       if ((rc == 0)  && (ret_data_len > 0)) {
+               /* Dump info on first interface */
+               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
+                       le32_to_cpu(out_buf->Capability));
+               cifs_dbg(FYI, "Link Speed %lld\n",
+                       le64_to_cpu(out_buf->LinkSpeed));
+       } else
+               cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+
+       return rc;
+}
+#endif /* STATS2 */
+
+static void
+smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       __le16 srch_path = 0; /* Null - open root of share */
+       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       struct cifs_open_parms oparms;
+       struct cifs_fid fid;
+
+       oparms.tcon = tcon;
+       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.disposition = FILE_OPEN;
+       oparms.create_options = 0;
+       oparms.fid = &fid;
+       oparms.reconnect = false;
+
+       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+       if (rc)
+               return;
+
+#ifdef CONFIG_CIFS_STATS2
+       SMB3_request_interfaces(xid, tcon);
+#endif /* STATS2 */
+
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_ATTRIBUTE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_DEVICE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
+       SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+       return;
+}
+
+static void
+smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       __le16 srch_path = 0; /* Null - open root of share */
+       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       struct cifs_open_parms oparms;
+       struct cifs_fid fid;
+
+       oparms.tcon = tcon;
+       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.disposition = FILE_OPEN;
+       oparms.create_options = 0;
+       oparms.fid = &fid;
+       oparms.reconnect = false;
+
+       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+       if (rc)
+               return;
+
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_ATTRIBUTE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_DEVICE_INFORMATION);
+       SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+       return;
+}
+
 static int
 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
                        struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -304,7 +392,19 @@ smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
                seq_puts(m, " ASYMMETRIC,");
        if (tcon->capabilities == 0)
                seq_puts(m, " None");
+       if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
+               seq_puts(m, " Aligned,");
+       if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
+               seq_puts(m, " Partition Aligned,");
+       if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
+               seq_puts(m, " SSD,");
+       if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
+               seq_puts(m, " TRIM-support,");
+
        seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
+       if (tcon->perf_sector_size)
+               seq_printf(m, "\tOptimal sector size: 0x%x",
+                          tcon->perf_sector_size);
 }
 
 static void
@@ -393,6 +493,85 @@ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
        SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
 }
 
+static int
+SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
+                    u64 persistent_fid, u64 volatile_fid,
+                    struct copychunk_ioctl *pcchunk)
+{
+       int rc;
+       unsigned int ret_data_len;
+       struct resume_key_req *res_key;
+
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+                       FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
+                       NULL, 0 /* no input */,
+                       (char **)&res_key, &ret_data_len);
+
+       if (rc) {
+               cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
+               goto req_res_key_exit;
+       }
+       if (ret_data_len < sizeof(struct resume_key_req)) {
+               cifs_dbg(VFS, "Invalid refcopy resume key length\n");
+               rc = -EINVAL;
+               goto req_res_key_exit;
+       }
+       memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
+
+req_res_key_exit:
+       kfree(res_key);
+       return rc;
+}
+
+static int
+smb2_clone_range(const unsigned int xid,
+                       struct cifsFileInfo *srcfile,
+                       struct cifsFileInfo *trgtfile, u64 src_off,
+                       u64 len, u64 dest_off)
+{
+       int rc;
+       unsigned int ret_data_len;
+       struct copychunk_ioctl *pcchunk;
+       char *retbuf = NULL;
+
+       pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+
+       if (pcchunk == NULL)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+       /* Request a key from the server to identify the source of the copy */
+       rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
+                               srcfile->fid.persistent_fid,
+                               srcfile->fid.volatile_fid, pcchunk);
+
+       /* Note: request_res_key sets res_key null only if rc !=0 */
+       if (rc)
+               return rc;
+
+       /* For now array only one chunk long, will make more flexible later */
+       pcchunk->ChunkCount = __constant_cpu_to_le32(1);
+       pcchunk->Reserved = 0;
+       pcchunk->SourceOffset = cpu_to_le64(src_off);
+       pcchunk->TargetOffset = cpu_to_le64(dest_off);
+       pcchunk->Length = cpu_to_le32(len);
+       pcchunk->Reserved2 = 0;
+
+       /* Request that server copy to target from src file identified by key */
+       rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
+                       trgtfile->fid.persistent_fid,
+                       trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+                       true /* is_fsctl */, (char *)pcchunk,
+                       sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
+
+       /* BB need to special case rc = EINVAL to alter chunk size */
+
+       cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len);
+
+       kfree(pcchunk);
+       return rc;
+}
+
 static int
 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
                struct cifs_fid *fid)
@@ -445,6 +624,14 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
                            cfile->fid.volatile_fid, cfile->pid, &eof);
 }
 
+static int
+smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifsFileInfo *cfile)
+{
+       return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
+                           cfile->fid.volatile_fid);
+}
+
 static int
 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     const char *path, struct cifs_sb_info *cifs_sb,
@@ -865,6 +1052,7 @@ struct smb_version_operations smb20_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb2_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -874,6 +1062,7 @@ struct smb_version_operations smb20_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -907,6 +1096,7 @@ struct smb_version_operations smb20_operations = {
        .set_oplock_level = smb2_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_operations smb21_operations = {
@@ -936,6 +1126,7 @@ struct smb_version_operations smb21_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb2_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -945,6 +1136,7 @@ struct smb_version_operations smb21_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -978,6 +1170,7 @@ struct smb_version_operations smb21_operations = {
        .set_oplock_level = smb21_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -1008,6 +1201,7 @@ struct smb_version_operations smb30_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb3_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -1017,6 +1211,7 @@ struct smb_version_operations smb30_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -1051,6 +1246,7 @@ struct smb_version_operations smb30_operations = {
        .set_oplock_level = smb3_set_oplock_level,
        .create_lease_buf = smb3_create_lease_buf,
        .parse_lease_buf = smb3_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_values smb20_values = {
index eba0efde66d70ae15974eef74b10ff6d83ca44d8..8ab05b0d6778f99343d5740ff037ccbfad7bbef8 100644 (file)
@@ -687,6 +687,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        else
                return -EIO;
 
+       /* no need to send SMB logoff if uid already closed due to reconnect */
+       if (ses->need_reconnect)
+               goto smb2_session_already_dead;
+
        rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
        if (rc)
                return rc;
@@ -701,6 +705,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
         * No tcon so can't do
         * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
         */
+
+smb2_session_already_dead:
        return rc;
 }
 
@@ -1131,6 +1137,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        cifs_dbg(FYI, "SMB2 IOCTL\n");
 
+       *out_data = NULL;
        /* zero out returned data len, in case of error */
        if (plen)
                *plen = 0;
@@ -1176,11 +1183,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
                req->Flags = 0;
 
        iov[0].iov_base = (char *)req;
-       /* 4 for rfc1002 length field */
-       iov[0].iov_len = get_rfc1002_length(req) + 4;
 
-       if (indatalen)
-               inc_rfc1001_len(req, indatalen);
+       /*
+        * If no input data, the size of ioctl struct in
+        * protocol spec still includes a 1 byte data buffer,
+        * but if input data passed to ioctl, we do not
+        * want to double count this, so we do not send
+        * the dummy one byte of data in iovec[0] if sending
+        * input data (in iovec[1]). We also must add 4 bytes
+        * in first iovec to allow for rfc1002 length field.
+        */
+
+       if (indatalen) {
+               iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+               inc_rfc1001_len(req, indatalen - 1);
+       } else
+               iov[0].iov_len = get_rfc1002_length(req) + 4;
+
 
        rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
        rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
@@ -1228,6 +1247,33 @@ ioctl_exit:
        return rc;
 }
 
+/*
+ *   Individual callers to ioctl worker function follow
+ */
+
+int
+SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                    u64 persistent_fid, u64 volatile_fid)
+{
+       int rc;
+       char *res_key = NULL;
+       struct  compress_ioctl fsctl_input;
+       char *ret_data = NULL;
+
+       fsctl_input.CompressionState =
+                       __constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+                       FSCTL_SET_COMPRESSION, true /* is_fsctl */,
+                       (char *)&fsctl_input /* data input */,
+                       2 /* in data len */, &ret_data /* out data */, NULL);
+
+       cifs_dbg(FYI, "set compression rc %d\n", rc);
+       kfree(res_key);
+
+       return rc;
+}
+
 int
 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
           u64 persistent_fid, u64 volatile_fid)
@@ -2293,7 +2339,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
-               goto qinf_exit;
+               goto qfsinf_exit;
        }
        rsp = (struct smb2_query_info_rsp *)iov.iov_base;
 
@@ -2305,7 +2351,70 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (!rc)
                copy_fs_info_to_kstatfs(info, fsdata);
 
-qinf_exit:
+qfsinf_exit:
+       free_rsp_buf(resp_buftype, iov.iov_base);
+       return rc;
+}
+
+int
+SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+             u64 persistent_fid, u64 volatile_fid, int level)
+{
+       struct smb2_query_info_rsp *rsp = NULL;
+       struct kvec iov;
+       int rc = 0;
+       int resp_buftype, max_len, min_len;
+       struct cifs_ses *ses = tcon->ses;
+       unsigned int rsp_len, offset;
+
+       if (level == FS_DEVICE_INFORMATION) {
+               max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+               min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+       } else if (level == FS_ATTRIBUTE_INFORMATION) {
+               max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
+               min_len = MIN_FS_ATTR_INFO_SIZE;
+       } else if (level == FS_SECTOR_SIZE_INFORMATION) {
+               max_len = sizeof(struct smb3_fs_ss_info);
+               min_len = sizeof(struct smb3_fs_ss_info);
+       } else {
+               cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
+               return -EINVAL;
+       }
+
+       rc = build_qfs_info_req(&iov, tcon, level, max_len,
+                               persistent_fid, volatile_fid);
+       if (rc)
+               return rc;
+
+       rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
+       if (rc) {
+               cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+               goto qfsattr_exit;
+       }
+       rsp = (struct smb2_query_info_rsp *)iov.iov_base;
+
+       rsp_len = le32_to_cpu(rsp->OutputBufferLength);
+       offset = le16_to_cpu(rsp->OutputBufferOffset);
+       rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
+       if (rc)
+               goto qfsattr_exit;
+
+       if (level == FS_ATTRIBUTE_INFORMATION)
+               memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
+                       + (char *)&rsp->hdr, min_t(unsigned int,
+                       rsp_len, max_len));
+       else if (level == FS_DEVICE_INFORMATION)
+               memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
+                       + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
+       else if (level == FS_SECTOR_SIZE_INFORMATION) {
+               struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
+                       (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
+               tcon->ss_flags = le32_to_cpu(ss_info->Flags);
+               tcon->perf_sector_size =
+                       le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+       }
+
+qfsattr_exit:
        free_rsp_buf(resp_buftype, iov.iov_base);
        return rc;
 }
index b83d0118a7577256ee4084d8a3abce4666e90bb7..b50a129572cd58149eacb2c2d93fa125f34ef9ff 100644 (file)
@@ -534,9 +534,16 @@ struct create_durable {
        } Data;
 } __packed;
 
+#define COPY_CHUNK_RES_KEY_SIZE        24
+struct resume_key_req {
+       char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
+       __le32  ContextLength;  /* MBZ */
+       char    Context[0];     /* ignored, Windows sets to 4 bytes of zero */
+} __packed;
+
 /* this goes in the ioctl buffer when doing a copychunk request */
 struct copychunk_ioctl {
-       char SourceKey[24];
+       char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
        __le32 ChunkCount; /* we are only sending 1 */
        __le32 Reserved;
        /* array will only be one chunk long for us */
@@ -546,6 +553,12 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
+struct copychunk_ioctl_rsp {
+       __le32 ChunksWritten;
+       __le32 ChunkBytesWritten;
+       __le32 TotalBytesWritten;
+} __packed;
+
 /* Response and Request are the same format */
 struct validate_negotiate_info {
        __le32 Capabilities;
@@ -569,6 +582,10 @@ struct network_interface_info_ioctl_rsp {
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
 
+struct compress_ioctl {
+       __le16 CompressionState; /* See cifspdu.h for possible flag values */
+} __packed;
+
 struct smb2_ioctl_req {
        struct smb2_hdr hdr;
        __le16 StructureSize;   /* Must be 57 */
@@ -584,7 +601,7 @@ struct smb2_ioctl_req {
        __le32 MaxOutputResponse;
        __le32 Flags;
        __u32  Reserved2;
-       char   Buffer[0];
+       __u8   Buffer[0];
 } __packed;
 
 struct smb2_ioctl_rsp {
@@ -870,14 +887,16 @@ struct smb2_lease_ack {
 
 /* File System Information Classes */
 #define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Set */
+#define FS_LABEL_INFORMATION           2 /* Local only */
 #define FS_SIZE_INFORMATION            3 /* Query */
 #define FS_DEVICE_INFORMATION          4 /* Query */
 #define FS_ATTRIBUTE_INFORMATION       5 /* Query */
 #define FS_CONTROL_INFORMATION         6 /* Query, Set */
 #define FS_FULL_SIZE_INFORMATION       7 /* Query */
 #define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
+#define FS_DRIVER_PATH_INFORMATION     9 /* Local only */
+#define FS_VOLUME_FLAGS_INFORMATION    10 /* Local only */
+#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
 
 struct smb2_fs_full_size_info {
        __le64 TotalAllocationUnits;
@@ -887,6 +906,22 @@ struct smb2_fs_full_size_info {
        __le32 BytesPerSector;
 } __packed;
 
+#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+       __le32 LogicalBytesPerSector;
+       __le32 PhysicalBytesPerSectorForAtomicity;
+       __le32 PhysicalBytesPerSectorForPerf;
+       __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
+       __le32 Flags;
+       __le32 ByteOffsetForSectorAlignment;
+       __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
 /* partial list of QUERY INFO levels */
 #define FILE_DIRECTORY_INFORMATION     1
 #define FILE_FULL_DIRECTORY_INFORMATION 2
index e3fb4801ee969295484fd8324bec855fc9600e0b..313813e4c19b300357bfc5f6db225a3be85d1121 100644 (file)
@@ -142,12 +142,16 @@ extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
 extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                         u64 persistent_fid, u64 volatile_fid,
                         FILE_BASIC_INFO *buf);
+extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                               u64 persistent_fid, u64 volatile_fid);
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                             const u64 persistent_fid, const u64 volatile_fid,
                             const __u8 oplock_level);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
                         u64 persistent_file_id, u64 volatile_file_id,
                         struct kstatfs *FSData);
+extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+                        u64 persistent_file_id, u64 volatile_file_id, int lvl);
 extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
                     const __u64 persist_fid, const __u64 volatile_fid,
                     const __u32 pid, const __u64 length, const __u64 offset,
index d952ee48f4dcc629a5d1e91c3470cbd2bd6d1aec..a4b2391fe66e4e11cea93e7396b987c335d43823 100644 (file)
 #define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
 #define FSCTL_SRV_READ_HASH          0x001441BB /* BB add struct */
 
+/* See FSCC 2.1.2.5 */
 #define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
 #define IO_REPARSE_TAG_HSM           0xC0000004
 #define IO_REPARSE_TAG_SIS           0x80000007
+#define IO_REPARSE_TAG_HSM2          0x80000006
+#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
+/* Used by the DFS filter. See MS-DFSC */
+#define IO_REPARSE_TAG_DFS           0x8000000A
+/* Used by the DFS filter See MS-DFSC */
+#define IO_REPARSE_TAG_DFSR          0x80000012
+#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
+/* See section MS-FSCC 2.1.2.4 */
+#define IO_REPARSE_TAG_SYMLINK       0xA000000C
+#define IO_REPARSE_TAG_DEDUP         0x80000013
+#define IO_REPARSE_APPXSTREAM       0xC0000014
+/* NFS symlinks, Win 8/SMB3 and later */
+#define IO_REPARSE_TAG_NFS           0x80000014
 
 /* fsctl flags */
 /* If Flags is set to this value, the request is an FSCTL not ioctl request */
index 6fdcb1b4a106747779ae77ed365116256e79edec..800b938e4061768f5f55ee414b45afa56e6d6aa2 100644 (file)
@@ -410,8 +410,13 @@ static int
 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
                      const int optype)
 {
-       return wait_for_free_credits(server, timeout,
-                               server->ops->get_credits_field(server, optype));
+       int *val;
+
+       val = server->ops->get_credits_field(server, optype);
+       /* Since an echo is already inflight, no need to wait to send another */
+       if (*val <= 0 && optype == CIFS_ECHO_OP)
+               return -EAGAIN;
+       return wait_for_free_credits(server, timeout, val);
 }
 
 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
index 41000305d716ea51c47ed52ddb5abe024045e958..d70df2e0e0da8a6b41d9bfeba798fb4cc9481a80 100644 (file)
@@ -1331,14 +1331,6 @@ rename_retry:
  * list is non-empty and continue searching.
  */
 
-/**
- * have_submounts - check for mounts over a dentry
- * @parent: dentry to check.
- *
- * Return true if the parent or its subdirectories contain
- * a mount point
- */
-
 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
 {
        int *ret = data;
@@ -1349,6 +1341,13 @@ static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
        return D_WALK_CONTINUE;
 }
 
+/**
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
+ *
+ * Return true if the parent or its subdirectories contain
+ * a mount point
+ */
 int have_submounts(struct dentry *parent)
 {
        int ret = 0;
@@ -1801,6 +1800,32 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
 
 EXPORT_SYMBOL(d_instantiate_unique);
 
+/**
+ * d_instantiate_no_diralias - instantiate a non-aliased dentry
+ * @entry: dentry to complete
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry.  If a directory alias is found, then
+ * return an error.  Together with d_materialise_unique() this guarantees that a
+ * directory inode may never have more than one alias.
+ */
+int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
+{
+       BUG_ON(!hlist_unhashed(&entry->d_alias));
+
+       spin_lock(&inode->i_lock);
+       if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
+               spin_unlock(&inode->i_lock);
+               return -EBUSY;
+       }
+       __d_instantiate(entry, inode);
+       spin_unlock(&inode->i_lock);
+       security_d_instantiate(entry, inode);
+
+       return 0;
+}
+EXPORT_SYMBOL(d_instantiate_no_diralias);
+
 struct dentry *d_make_root(struct inode *root_inode)
 {
        struct dentry *res = NULL;
index 0e04142d5962312fcb055738479247b2364a252e..a142314710a3a9d020e3d3d7a4cbbf2c168af72a 100644 (file)
@@ -127,6 +127,7 @@ struct dio {
        spinlock_t bio_lock;            /* protects BIO fields below */
        int page_errors;                /* errno from get_user_pages() */
        int is_async;                   /* is IO async ? */
+       int should_dirty;               /* should we mark read pages dirty? */
        bool defer_completion;          /* defer AIO completion to workqueue? */
        int io_error;                   /* IO error in completion path */
        unsigned long refcount;         /* direct_io_worker() and bios */
@@ -403,7 +404,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
        dio->refcount++;
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-       if (dio->is_async && dio->rw == READ)
+       if (dio->is_async && dio->rw == READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
        if (sdio->submit_io)
@@ -474,13 +475,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
        if (!uptodate)
                dio->io_error = -EIO;
 
-       if (dio->is_async && dio->rw == READ) {
+       if (dio->is_async && dio->rw == READ && dio->should_dirty) {
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
 
-                       if (dio->rw == READ && !PageCompound(page))
+                       if (dio->rw == READ && !PageCompound(page) &&
+                           dio->should_dirty)
                                set_page_dirty_lock(page);
                        page_cache_release(page);
                }
@@ -1081,6 +1083,101 @@ static inline int drop_refcount(struct dio *dio)
        return ret2;
 }
 
+static ssize_t direct_IO_iovec(const struct iovec *iov, unsigned long nr_segs,
+                              struct dio *dio, struct dio_submit *sdio,
+                              unsigned blkbits, struct buffer_head *map_bh)
+{
+       size_t bytes;
+       ssize_t retval = 0;
+       int seg;
+       unsigned long user_addr;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->pages_in_io +=
+                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
+                               PAGE_SIZE - user_addr / PAGE_SIZE);
+       }
+
+       dio->should_dirty = 1;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->size += bytes = iov[seg].iov_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bytes >> blkbits);
+               /* Page fetching state */
+               sdio->head = 0;
+               sdio->tail = 0;
+               sdio->curr_page = 0;
+
+               sdio->total_pages = 0;
+               if (user_addr & (PAGE_SIZE-1)) {
+                       sdio->total_pages++;
+                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
+               }
+               sdio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+               sdio->curr_user_address = user_addr;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += iov[seg].iov_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       } /* end iovec loop */
+
+       return retval;
+}
+
+static ssize_t direct_IO_bvec(struct bio_vec *bvec, unsigned long nr_segs,
+                             struct dio *dio, struct dio_submit *sdio,
+                             unsigned blkbits, struct buffer_head *map_bh)
+{
+       ssize_t retval = 0;
+       int seg;
+
+       sdio->pages_in_io += nr_segs;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               sdio->size += bvec[seg].bv_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = bvec[seg].bv_offset >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bvec[seg].bv_len  >> blkbits);
+               /* Page fetching state */
+               sdio->curr_page = 0;
+               page_cache_get(bvec[seg].bv_page);
+               dio->pages[0] = bvec[seg].bv_page;
+               sdio->head = 0;
+               sdio->tail = 1;
+
+               sdio->total_pages = 1;
+               sdio->curr_user_address = 0;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += bvec[seg].bv_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       }
+
+       return retval;
+}
+
 /*
  * This is a library function for use by filesystem drivers.
  *
@@ -1108,9 +1205,9 @@ static inline int drop_refcount(struct dio *dio)
  */
 static inline ssize_t
 do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset, 
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        int seg;
        size_t size;
@@ -1122,10 +1219,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        loff_t end = offset;
        struct dio *dio;
        struct dio_submit sdio = { 0, };
-       unsigned long user_addr;
-       size_t bytes;
        struct buffer_head map_bh = { 0, };
        struct blk_plug plug;
+       unsigned long nr_segs = iter->nr_segs;
 
        if (rw & WRITE)
                rw = WRITE_ODIRECT;
@@ -1144,20 +1240,49 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        }
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if (unlikely((addr & blocksize_mask) ||
-                            (size & blocksize_mask))) {
-                       if (bdev)
-                               blkbits = blksize_bits(
-                                        bdev_logical_block_size(bdev));
-                       blocksize_mask = (1 << blkbits) - 1;
-                       if ((addr & blocksize_mask) || (size & blocksize_mask))
-                               goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
                }
-       }
+       } else if (iov_iter_has_bvec(iter)) {
+               /*
+                * Is this necessary, or can we trust the in-kernel
+                * caller? Can we replace this with
+                *      end += iov_iter_count(iter); ?
+                */
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
+               }
+       } else
+               BUG();
 
        /* watch out for a 0 len io from a tricksy fs */
        if (rw == READ && end == offset)
@@ -1251,47 +1376,14 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (unlikely(sdio.blkfactor))
                sdio.pages_in_io = 2;
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.pages_in_io +=
-                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
-                               PAGE_SIZE - user_addr / PAGE_SIZE);
-       }
-
        blk_start_plug(&plug);
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.size += bytes = iov[seg].iov_len;
-
-               /* Index into the first page of the first block */
-               sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
-               sdio.final_block_in_request = sdio.block_in_file +
-                                               (bytes >> blkbits);
-               /* Page fetching state */
-               sdio.head = 0;
-               sdio.tail = 0;
-               sdio.curr_page = 0;
-
-               sdio.total_pages = 0;
-               if (user_addr & (PAGE_SIZE-1)) {
-                       sdio.total_pages++;
-                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
-               }
-               sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
-               sdio.curr_user_address = user_addr;
-
-               retval = do_direct_IO(dio, &sdio, &map_bh);
-
-               dio->result += iov[seg].iov_len -
-                       ((sdio.final_block_in_request - sdio.block_in_file) <<
-                                       blkbits);
-
-               if (retval) {
-                       dio_cleanup(dio, &sdio);
-                       break;
-               }
-       } /* end iovec loop */
+       if (iov_iter_has_iovec(iter))
+               retval = direct_IO_iovec(iov_iter_iovec(iter), nr_segs, dio,
+                                        &sdio, blkbits, &map_bh);
+       else
+               retval = direct_IO_bvec(iov_iter_bvec(iter), nr_segs, dio,
+                                       &sdio, blkbits, &map_bh);
 
        if (retval == -ENOTBLK) {
                /*
@@ -1360,9 +1452,9 @@ out:
 
 ssize_t
 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        /*
         * The block device state is needed in the end to finally
@@ -1376,9 +1468,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        prefetch(bdev->bd_queue);
        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-                                    nr_segs, get_block, end_io,
-                                    submit_io, flags);
+       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
+                                    get_block, end_io, submit_io, flags);
 }
 
 EXPORT_SYMBOL(__blockdev_direct_IO);
index 88556dc0458ee045659ed5a91875fb51d7ac2264..d5abafd56a6d69e53242f61fd487b6c04798abb5 100644 (file)
@@ -706,9 +706,7 @@ static int lkb_idr_is_local(int id, void *p, void *data)
 {
        struct dlm_lkb *lkb = p;
 
-       if (!lkb->lkb_nodeid)
-               return 1;
-       return 0;
+       return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
 }
 
 static int lkb_idr_is_any(int id, void *p, void *data)
index bf12ba5dd223befe93ce786c3a3848cdf5e38fcd..4000f6b3a7504505bb46d3d5ef2946b53e3d5cc9 100644 (file)
  */
 static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct dentry *lower_dentry;
-       int rc = 1;
+       struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+       int rc;
+
+       if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+               return 1;
 
        if (flags & LOOKUP_RCU)
                return -ECHILD;
 
-       lower_dentry = ecryptfs_dentry_to_lower(dentry);
-       if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
-               goto out;
        rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
        if (dentry->d_inode) {
                struct inode *lower_inode =
@@ -60,12 +60,17 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
                fsstack_copy_attr_all(dentry->d_inode, lower_inode);
        }
-out:
        return rc;
 }
 
 struct kmem_cache *ecryptfs_dentry_info_cache;
 
+static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
+{
+       kmem_cache_free(ecryptfs_dentry_info_cache,
+               container_of(head, struct ecryptfs_dentry_info, rcu));
+}
+
 /**
  * ecryptfs_d_release
  * @dentry: The ecryptfs dentry
@@ -74,15 +79,11 @@ struct kmem_cache *ecryptfs_dentry_info_cache;
  */
 static void ecryptfs_d_release(struct dentry *dentry)
 {
-       if (ecryptfs_dentry_to_private(dentry)) {
-               if (ecryptfs_dentry_to_lower(dentry)) {
-                       dput(ecryptfs_dentry_to_lower(dentry));
-                       mntput(ecryptfs_dentry_to_lower_mnt(dentry));
-               }
-               kmem_cache_free(ecryptfs_dentry_info_cache,
-                               ecryptfs_dentry_to_private(dentry));
+       struct ecryptfs_dentry_info *p = dentry->d_fsdata;
+       if (p) {
+               path_put(&p->lower_path);
+               call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
        }
-       return;
 }
 
 const struct dentry_operations ecryptfs_dops = {
index df19d34a033b9521de8623bb33ff2de6c0d9361a..90d1882b306face4c53d10ed08c44e3ec77b726a 100644 (file)
@@ -261,7 +261,10 @@ struct ecryptfs_inode_info {
  * vfsmount too. */
 struct ecryptfs_dentry_info {
        struct path lower_path;
-       struct ecryptfs_crypt_stat *crypt_stat;
+       union {
+               struct ecryptfs_crypt_stat *crypt_stat;
+               struct rcu_head rcu;
+       };
 };
 
 /**
@@ -512,13 +515,6 @@ ecryptfs_dentry_to_lower(struct dentry *dentry)
        return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
 }
 
-static inline void
-ecryptfs_set_dentry_lower(struct dentry *dentry, struct dentry *lower_dentry)
-{
-       ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry =
-               lower_dentry;
-}
-
 static inline struct vfsmount *
 ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
 {
@@ -531,13 +527,6 @@ ecryptfs_dentry_to_lower_path(struct dentry *dentry)
        return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
 }
 
-static inline void
-ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
-{
-       ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt =
-               lower_mnt;
-}
-
 #define ecryptfs_printk(type, fmt, arg...) \
         __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
 __printf(1, 2)
index 992cf95830b5792a5d510f7d588049d27c139a94..3ed6e5f5bb4b73711792494b2a6edf4c415dd5a1 100644 (file)
 /**
  * ecryptfs_read_update_atime
  *
- * generic_file_read updates the atime of upper layer inode.  But, it
+ * generic_file_read_iter updates the atime of upper layer inode.  But, it
  * doesn't give us a chance to update the atime of the lower layer
- * inode.  This function is a wrapper to generic_file_read.  It
- * updates the atime of the lower level inode if generic_file_read
+ * inode.  This function is a wrapper to generic_file_read_iter.  It
+ * updates the atime of the lower level inode if generic_file_read_iter
  * returns without any errors. This is to be used only for file reads.
  * The function to be used for directory reads is ecryptfs_read.
  */
 static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
-                               const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos)
+                               struct iov_iter *iter, loff_t pos)
 {
        ssize_t rc;
        struct path *path;
        struct file *file = iocb->ki_filp;
 
-       rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
+       rc = generic_file_read_iter(iocb, iter, pos);
        /*
         * Even though this is a async interface, we need to wait
         * for IO to finish to update atime
@@ -357,9 +356,9 @@ const struct file_operations ecryptfs_dir_fops = {
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
-       .aio_read = ecryptfs_read_update_atime,
+       .read_iter = ecryptfs_read_update_atime,
        .write = do_sync_write,
-       .aio_write = generic_file_aio_write,
+       .write_iter = generic_file_write_iter,
        .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
index 67e9b6339691f9cc01a378564fa5aa0527da1977..0f9b66eaa7677ce920d8488e5afd49fc684c4ac7 100644 (file)
@@ -361,8 +361,8 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
        BUG_ON(!d_count(lower_dentry));
 
        ecryptfs_set_dentry_private(dentry, dentry_info);
-       ecryptfs_set_dentry_lower(dentry, lower_dentry);
-       ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
+       dentry_info->lower_path.mnt = lower_mnt;
+       dentry_info->lower_path.dentry = lower_dentry;
 
        if (!lower_dentry->d_inode) {
                /* We want to add because we couldn't find in lower */
@@ -703,16 +703,6 @@ out:
        return NULL;
 }
 
-static void
-ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
-{
-       char *buf = nd_get_link(nd);
-       if (!IS_ERR(buf)) {
-               /* Free the char* */
-               kfree(buf);
-       }
-}
-
 /**
  * upper_size_to_lower_size
  * @crypt_stat: Crypt_stat associated with file
@@ -1121,7 +1111,7 @@ out:
 const struct inode_operations ecryptfs_symlink_iops = {
        .readlink = generic_readlink,
        .follow_link = ecryptfs_follow_link,
-       .put_link = ecryptfs_put_link,
+       .put_link = kfree_put_link,
        .permission = ecryptfs_permission,
        .setattr = ecryptfs_setattr,
        .getattr = ecryptfs_getattr_link,
index 7d52806c21197206a5932b08a68e7dc9d6899253..4725a07f003cf3279fa81813748442afc24a053c 100644 (file)
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        struct ecryptfs_msg_ctx *msg_ctx;
        struct ecryptfs_message *msg = NULL;
        char *auth_tok_sig;
-       char *payload;
+       char *payload = NULL;
        size_t payload_len = 0;
        int rc;
 
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        }
 out:
        kfree(msg);
+       kfree(payload);
        return rc;
 }
 
index eb1c5979ecaf673d7e984b6b78219217e63c9bc4..1b119d3bf924d16eea7f91f52449b122ecb5dcb1 100644 (file)
@@ -585,8 +585,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 
        /* ->kill_sb() will take care of root_info */
        ecryptfs_set_dentry_private(s->s_root, root_info);
-       ecryptfs_set_dentry_lower(s->s_root, path.dentry);
-       ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+       root_info->lower_path = path;
 
        s->s_flags |= MS_ACTIVE;
        return dget(s->s_root);
index 491c6c078e7f5e0ac420646288452d93cca86ce2..20564f8a358a8e9809f5cb086c1900edeea9ffcf 100644 (file)
@@ -69,8 +69,8 @@ const struct file_operations exofs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .release        = exofs_release_file,
index a5b3a5db31206f8f3c84781362f7c45e919ddf0e..6af043bab460b0225a2f08a3f9b9b6513c97d9f6 100644 (file)
@@ -64,8 +64,8 @@ const struct file_operations ext2_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext2_compat_ioctl,
index c260de6d7b6df9e5350ecfc019f41ed6def75470..cf91b336e3dfc7e953747d1f84f5c8d3caff36c4 100644 (file)
@@ -848,18 +848,16 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext2_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
        if (ret < 0 && (rw & WRITE))
-               ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               ext2_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 25cb413277e906edb1f037ba625ece7aa92903bb..a79677188b54128121a04b4bf671f48b7dd90901 100644 (file)
@@ -52,8 +52,8 @@ const struct file_operations ext3_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext3_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext3_compat_ioctl,
index 2bd85486b87974b390892a2280676a49d8eb274e..85bd13b8b7589b8522921ff7bd10bc89fcdfb270 100644 (file)
@@ -1862,8 +1862,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * VFS code falls back into buffered path in that case so we are safe.
  */
 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1871,10 +1870,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext3_direct_IO_enter(inode, offset, count, rw);
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -1898,15 +1897,14 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext3_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + count;
 
                if (end > isize)
                        ext3_truncate_failed_direct_write(inode);
@@ -1949,8 +1947,7 @@ retry:
                        ret = err;
        }
 out:
-       trace_ext3_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+       trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
        return ret;
 }
 
index 1194b1f0f8396c934ab6a382deadab6c0a6c4c2f..f8cde46de9cd77c3047182e94164bdfdac4a317d 100644 (file)
@@ -1783,7 +1783,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext3_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -1791,10 +1791,9 @@ retry:
        if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext3_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index af815ea9d7cc4b6e209e28eea6f9bc97a6f247ce..850bf979beb00ab6917893cbe10bcc8b89afd9b7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <linux/ratelimit.h>
 #include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
@@ -1314,6 +1315,11 @@ struct ext4_sb_info {
        unsigned long s_es_last_sorted;
        struct percpu_counter s_extent_cache_cnt;
        spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+
+       /* Ratelimit ext4 messages. */
+       struct ratelimit_state s_err_ratelimit_state;
+       struct ratelimit_state s_warning_ratelimit_state;
+       struct ratelimit_state s_msg_ratelimit_state;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2117,8 +2123,7 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                struct ext4_map_blocks *map, int flags);
 extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                               const struct iovec *iov, loff_t offset,
-                               unsigned long nr_segs);
+                               struct iov_iter *iter, loff_t offset);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
index 3da21945ff1fff3e16b8b70b332a0a6ee330c93e..2ab3dcb741df6f5b758565401534fc018f3f1119 100644 (file)
@@ -74,12 +74,11 @@ void ext4_unwritten_wait(struct inode *inode)
  * or one thread will zero the other's data, causing corruption.
  */
 static int
-ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
-                  unsigned long nr_segs, loff_t pos)
+ext4_unaligned_aio(struct inode *inode, struct iov_iter *iter, loff_t pos)
 {
        struct super_block *sb = inode->i_sb;
        int blockmask = sb->s_blocksize - 1;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        loff_t final_size = pos + count;
 
        if (pos >= inode->i_size)
@@ -92,8 +91,8 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
-                   unsigned long nr_segs, loff_t pos)
+ext4_file_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -101,11 +100,11 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
        int unaligned_aio = 0;
        ssize_t ret;
        int overwrite = 0;
-       size_t length = iov_length(iov, nr_segs);
+       size_t length = iov_iter_count(iter);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
            !is_sync_kiocb(iocb))
-               unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
+               unaligned_aio = ext4_unaligned_aio(inode, iter, pos);
 
        /* Unaligned direct AIO must be serialized; see comment above */
        if (unaligned_aio) {
@@ -146,7 +145,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
                        overwrite = 1;
        }
 
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
@@ -165,8 +164,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
@@ -178,22 +176,24 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-               size_t length = iov_length(iov, nr_segs);
+               size_t length = iov_iter_count(iter);
 
                if ((pos > sbi->s_bitmap_maxbytes ||
                    (pos == sbi->s_bitmap_maxbytes && length > 0)))
                        return -EFBIG;
 
                if (pos + length > sbi->s_bitmap_maxbytes) {
-                       nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
-                                             sbi->s_bitmap_maxbytes - pos);
+                       ret = iov_iter_shorten(iter,
+                                              sbi->s_bitmap_maxbytes - pos);
+                       if (ret)
+                               return ret;
                }
        }
 
        if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
-               ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+               ret = ext4_file_dio_write(iocb, iter, pos);
        else
-               ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+               ret = generic_file_write_iter(iocb, iter, pos);
 
        return ret;
 }
@@ -594,8 +594,8 @@ const struct file_operations ext4_file_operations = {
        .llseek         = ext4_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ext4_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = ext4_file_write_iter,
        .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
index 594009f5f523f0fd2228a72f1aa593a6f3ebf66f..8026469aa1fb400cc1a2e1dfda89b4cacceb8978 100644 (file)
@@ -639,8 +639,7 @@ out:
  * VFS code falls back into buffered path in that case so we are safe.
  */
 ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          const struct iovec *iov, loff_t offset,
-                          unsigned long nr_segs)
+                          struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -648,7 +647,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
        if (rw == WRITE) {
@@ -687,18 +686,17 @@ retry:
                        goto locked;
                }
                ret = __blockdev_direct_IO(rw, iocb, inode,
-                                inode->i_sb->s_bdev, iov,
-                                offset, nr_segs,
-                                ext4_get_block, NULL, NULL, 0);
+                                inode->i_sb->s_bdev, iter,
+                                offset, ext4_get_block, NULL, NULL, 0);
                inode_dio_done(inode);
        } else {
 locked:
-               ret = blockdev_direct_IO(rw, iocb, inode, iov,
-                                offset, nr_segs, ext4_get_block);
+               ret = blockdev_direct_IO(rw, iocb, inode, iter,
+                                offset, ext4_get_block);
 
                if (unlikely((rw & WRITE) && ret < 0)) {
                        loff_t isize = i_size_read(inode);
-                       loff_t end = offset + iov_length(iov, nr_segs);
+                       loff_t end = offset + iov_iter_count(iter);
 
                        if (end > isize)
                                ext4_truncate_failed_write(inode);
index 0d424d7ac02b0a30f98e713bb10403e90ced51b5..05599cd23a1bd7cd054d72cbb8f53163f68ac7d9 100644 (file)
@@ -2178,6 +2178,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
  *
  * @handle - handle for journal operations
  * @mpd - extent to map
+ * @give_up_on_write - we set this to true iff there is a fatal error and there
+ *                     is no hope of writing the data. The caller should discard
+ *                     dirty pages to avoid infinite loops.
  *
  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
  * delayed, blocks are allocated, if it is unwritten, we may need to convert
@@ -2295,6 +2298,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
        struct address_space *mapping = mpd->inode->i_mapping;
        struct pagevec pvec;
        unsigned int nr_pages;
+       long left = mpd->wbc->nr_to_write;
        pgoff_t index = mpd->first_page;
        pgoff_t end = mpd->last_page;
        int tag;
@@ -2330,6 +2334,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        if (page->index > end)
                                goto out;
 
+                       /*
+                        * Accumulated enough dirty pages? This doesn't apply
+                        * to WB_SYNC_ALL mode. For integrity sync we have to
+                        * keep going because someone may be concurrently
+                        * dirtying pages, and we might have synced a lot of
+                        * newly appeared dirty pages, but have not synced all
+                        * of the old dirty pages.
+                        */
+                       if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+                               goto out;
+
                        /* If we can't merge this page, we are done. */
                        if (mpd->map.m_len > 0 && mpd->next_page != page->index)
                                goto out;
@@ -2364,19 +2379,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        if (err <= 0)
                                goto out;
                        err = 0;
-
-                       /*
-                        * Accumulated enough dirty pages? This doesn't apply
-                        * to WB_SYNC_ALL mode. For integrity sync we have to
-                        * keep going because someone may be concurrently
-                        * dirtying pages, and we might have synced a lot of
-                        * newly appeared dirty pages, but have not synced all
-                        * of the old dirty pages.
-                        */
-                       if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
-                           mpd->next_page - mpd->first_page >=
-                                                       mpd->wbc->nr_to_write)
-                               goto out;
+                       left--;
                }
                pagevec_release(&pvec);
                cond_resched();
@@ -2563,7 +2566,7 @@ retry:
                        break;
        }
        blk_finish_plug(&plug);
-       if (!ret && !cycled) {
+       if (!ret && !cycled && wbc->nr_to_write > 0) {
                cycled = 1;
                mpd.last_page = writeback_index - 1;
                mpd.first_page = 0;
@@ -3067,13 +3070,12 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  *
  */
 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int overwrite = 0;
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
@@ -3082,7 +3084,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
-               return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+               return ext4_ind_direct_IO(rw, iocb, iter, offset);
 
        BUG_ON(iocb->private == NULL);
 
@@ -3149,8 +3151,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                dio_flags = DIO_LOCKING;
        }
        ret = __blockdev_direct_IO(rw, iocb, inode,
-                                  inode->i_sb->s_bdev, iov,
-                                  offset, nr_segs,
+                                  inode->i_sb->s_bdev, iter,
+                                  offset,
                                   get_block_func,
                                   ext4_end_io_dio,
                                   NULL,
@@ -3204,8 +3206,7 @@ retake_lock:
 }
 
 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3221,13 +3222,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext4_direct_IO_enter(inode, offset, iov_iter_count(iter), rw);
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+               ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
        else
-               ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
-       trace_ext4_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+               ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
+       trace_ext4_direct_IO_exit(inode, offset, iov_iter_count(iter), rw, ret);
        return ret;
 }
 
index 1bec5a5c1e45a29e9ead318987ec03d803104c5a..5a0408d7b1147094c3e82b6d11750b33396b7732 100644 (file)
@@ -2319,7 +2319,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext4_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -2328,10 +2328,9 @@ retry:
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext4_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index d7d0c7b46ed40feec818701641fce792569dcec9..d488f80ee32df1137e91df0aed72bef2f61b49ac 100644 (file)
@@ -197,14 +197,15 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
 static void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+       struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
        struct workqueue_struct *wq;
        unsigned long flags;
 
        /* Only reserved conversions from writeback should enter here */
        WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
-       WARN_ON(!io_end->handle);
+       WARN_ON(!io_end->handle && sbi->s_journal);
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+       wq = sbi->rsv_conversion_wq;
        if (list_empty(&ei->i_rsv_conversion_list))
                queue_work(wq, &ei->i_rsv_conversion_work);
        list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
index 2c2e6cbc6bedc549938262e50be8e118d1cb6de7..d3a857bfae47631db0ab56f765f1028a244261ea 100644 (file)
@@ -411,20 +411,26 @@ static void ext4_handle_error(struct super_block *sb)
                        sb->s_id);
 }
 
+#define ext4_error_ratelimit(sb)                                       \
+               ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),     \
+                            "EXT4-fs error")
+
 void __ext4_error(struct super_block *sb, const char *function,
                  unsigned int line, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
 
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
-              sb->s_id, function, line, current->comm, &vaf);
-       va_end(args);
+       if (ext4_error_ratelimit(sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               printk(KERN_CRIT
+                      "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+                      sb->s_id, function, line, current->comm, &vaf);
+               va_end(args);
+       }
        save_error_info(sb, function, line);
-
        ext4_handle_error(sb);
 }
 
@@ -438,22 +444,23 @@ void __ext4_error_inode(struct inode *inode, const char *function,
 
        es->s_last_error_ino = cpu_to_le32(inode->i_ino);
        es->s_last_error_block = cpu_to_le64(block);
+       if (ext4_error_ratelimit(inode->i_sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               if (block)
+                       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+                              "inode #%lu: block %llu: comm %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              block, current->comm, &vaf);
+               else
+                       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+                              "inode #%lu: comm %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              current->comm, &vaf);
+               va_end(args);
+       }
        save_error_info(inode->i_sb, function, line);
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       if (block)
-               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-                      "inode #%lu: block %llu: comm %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      block, current->comm, &vaf);
-       else
-               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-                      "inode #%lu: comm %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      current->comm, &vaf);
-       va_end(args);
-
        ext4_handle_error(inode->i_sb);
 }
 
@@ -469,27 +476,28 @@ void __ext4_error_file(struct file *file, const char *function,
 
        es = EXT4_SB(inode->i_sb)->s_es;
        es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+       if (ext4_error_ratelimit(inode->i_sb)) {
+               path = d_path(&(file->f_path), pathname, sizeof(pathname));
+               if (IS_ERR(path))
+                       path = "(unknown)";
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               if (block)
+                       printk(KERN_CRIT
+                              "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+                              "block %llu: comm %s: path %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              block, current->comm, path, &vaf);
+               else
+                       printk(KERN_CRIT
+                              "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+                              "comm %s: path %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              current->comm, path, &vaf);
+               va_end(args);
+       }
        save_error_info(inode->i_sb, function, line);
-       path = d_path(&(file->f_path), pathname, sizeof(pathname));
-       if (IS_ERR(path))
-               path = "(unknown)";
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       if (block)
-               printk(KERN_CRIT
-                      "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-                      "block %llu: comm %s: path %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      block, current->comm, path, &vaf);
-       else
-               printk(KERN_CRIT
-                      "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-                      "comm %s: path %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      current->comm, path, &vaf);
-       va_end(args);
-
        ext4_handle_error(inode->i_sb);
 }
 
@@ -543,11 +551,13 @@ void __ext4_std_error(struct super_block *sb, const char *function,
            (sb->s_flags & MS_RDONLY))
                return;
 
-       errstr = ext4_decode_error(sb, errno, nbuf);
-       printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
-              sb->s_id, function, line, errstr);
-       save_error_info(sb, function, line);
+       if (ext4_error_ratelimit(sb)) {
+               errstr = ext4_decode_error(sb, errno, nbuf);
+               printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+                      sb->s_id, function, line, errstr);
+       }
 
+       save_error_info(sb, function, line);
        ext4_handle_error(sb);
 }
 
@@ -597,6 +607,9 @@ void __ext4_msg(struct super_block *sb,
        struct va_format vaf;
        va_list args;
 
+       if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
+               return;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -610,6 +623,10 @@ void __ext4_warning(struct super_block *sb, const char *function,
        struct va_format vaf;
        va_list args;
 
+       if (!___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+                         "EXT4-fs warning"))
+               return;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -633,18 +650,20 @@ __acquires(bitlock)
        es->s_last_error_block = cpu_to_le64(block);
        __save_error_info(sb, function, line);
 
-       va_start(args, fmt);
-
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
-              sb->s_id, function, line, grp);
-       if (ino)
-               printk(KERN_CONT "inode %lu: ", ino);
-       if (block)
-               printk(KERN_CONT "block %llu:", (unsigned long long) block);
-       printk(KERN_CONT "%pV\n", &vaf);
-       va_end(args);
+       if (ext4_error_ratelimit(sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
+                      sb->s_id, function, line, grp);
+               if (ino)
+                       printk(KERN_CONT "inode %lu: ", ino);
+               if (block)
+                       printk(KERN_CONT "block %llu:",
+                              (unsigned long long) block);
+               printk(KERN_CONT "%pV\n", &vaf);
+               va_end(args);
+       }
 
        if (test_opt(sb, ERRORS_CONT)) {
                ext4_commit_super(sb, 0);
@@ -2606,6 +2625,12 @@ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
 
 static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
@@ -2623,6 +2648,12 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(max_writeback_mb_bump),
        ATTR_LIST(extent_max_zeroout_kb),
        ATTR_LIST(trigger_fs_error),
+       ATTR_LIST(err_ratelimit_interval_ms),
+       ATTR_LIST(err_ratelimit_burst),
+       ATTR_LIST(warning_ratelimit_interval_ms),
+       ATTR_LIST(warning_ratelimit_burst),
+       ATTR_LIST(msg_ratelimit_interval_ms),
+       ATTR_LIST(msg_ratelimit_burst),
        NULL,
 };
 
@@ -4118,6 +4149,11 @@ no_journal:
        if (es->s_error_count)
                mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
+       /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
+       ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
+       ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
+       ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+
        kfree(orig_data);
        return 0;
 
index c081e34f717f6903492acd3c4bc92d26dc888e7e..03e9bebba1989ef20263fbd1656959b764927b03 100644 (file)
@@ -1350,6 +1350,8 @@ retry:
                                    s_min_extra_isize) {
                                        tried_min_extra_isize++;
                                        new_extra_isize = s_min_extra_isize;
+                                       kfree(is); is = NULL;
+                                       kfree(bs); bs = NULL;
                                        goto retry;
                                }
                                error = -1;
index bb312201ca950114782f6a811725102da3baa718..5649a9d8e94225bf0330b30b84e23819465e0903 100644 (file)
@@ -81,7 +81,7 @@ static int f2fs_write_meta_page(struct page *page,
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
        /* Should not write any meta pages, if any IO error was occurred */
-       if (wbc->for_reclaim ||
+       if (wbc->for_reclaim || sbi->por_doing ||
                        is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
                dec_page_count(sbi, F2FS_DIRTY_META);
                wbc->pages_skipped++;
@@ -206,6 +206,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
 void release_orphan_inode(struct f2fs_sb_info *sbi)
 {
        mutex_lock(&sbi->orphan_inode_mutex);
+       BUG_ON(sbi->n_orphans == 0);
        sbi->n_orphans--;
        mutex_unlock(&sbi->orphan_inode_mutex);
 }
@@ -225,12 +226,8 @@ void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
                        break;
                orphan = NULL;
        }
-retry:
-       new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+
+       new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
        new->ino = ino;
 
        /* add new_oentry into list which is sorted by inode number */
@@ -253,6 +250,7 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
                if (orphan->ino == ino) {
                        list_del(&orphan->list);
                        kmem_cache_free(orphan_entry_slab, orphan);
+                       BUG_ON(sbi->n_orphans == 0);
                        sbi->n_orphans--;
                        break;
                }
@@ -277,7 +275,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       sbi->por_doing = 1;
+       sbi->por_doing = true;
        start_blk = __start_cp_addr(sbi) + 1;
        orphan_blkaddr = __start_sum_addr(sbi) - 1;
 
@@ -294,7 +292,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        }
        /* clear Orphan Flag */
        clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-       sbi->por_doing = 0;
+       sbi->por_doing = false;
        return 0;
 }
 
@@ -469,9 +467,7 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
                        return -EEXIST;
        }
        list_add_tail(&new->list, head);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->n_dirty_dirs++;
-#endif
+       stat_inc_dirty_dir(sbi);
        return 0;
 }
 
@@ -482,12 +478,8 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
 
        if (!S_ISDIR(inode->i_mode))
                return;
-retry:
-       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+
+       new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
        new->inode = inode;
        INIT_LIST_HEAD(&new->list);
 
@@ -504,13 +496,9 @@ retry:
 void add_dirty_dir_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct dir_inode_entry *new;
-retry:
-       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+       struct dir_inode_entry *new =
+                       f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+
        new->inode = inode;
        INIT_LIST_HEAD(&new->list);
 
@@ -541,9 +529,7 @@ void remove_dirty_dir_inode(struct inode *inode)
                if (entry->inode == inode) {
                        list_del(&entry->list);
                        kmem_cache_free(inode_entry_slab, entry);
-#ifdef CONFIG_F2FS_STAT_FS
-                       sbi->n_dirty_dirs--;
-#endif
+                       stat_dec_dirty_dir(sbi);
                        break;
                }
        }
@@ -617,11 +603,10 @@ static void block_operations(struct f2fs_sb_info *sbi)
        blk_start_plug(&plug);
 
 retry_flush_dents:
-       mutex_lock_all(sbi);
-
+       f2fs_lock_all(sbi);
        /* write all the dirty dentry pages */
        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
-               mutex_unlock_all(sbi);
+               f2fs_unlock_all(sbi);
                sync_dirty_dir_inodes(sbi);
                goto retry_flush_dents;
        }
@@ -644,7 +629,7 @@ retry_flush_nodes:
 static void unblock_operations(struct f2fs_sb_info *sbi)
 {
        mutex_unlock(&sbi->node_write);
-       mutex_unlock_all(sbi);
+       f2fs_unlock_all(sbi);
 }
 
 static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
@@ -756,8 +741,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        f2fs_put_page(cp_page, 1);
 
        /* wait for previous submitted node/meta pages writeback */
-       while (get_pages(sbi, F2FS_WRITEBACK))
-               congestion_wait(BLK_RW_ASYNC, HZ / 50);
+       sbi->cp_task = current;
+       while (get_pages(sbi, F2FS_WRITEBACK)) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (!get_pages(sbi, F2FS_WRITEBACK))
+                       break;
+               io_schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+       sbi->cp_task = NULL;
 
        filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
        filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
index 941f9b9ca3a5b41fa9208f1d929b20212e08dfcb..d42a1bf993a84921f645ff04a70fdb90a6a43689 100644 (file)
@@ -68,9 +68,6 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                                        struct buffer_head *bh_result)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
-#ifdef CONFIG_F2FS_STAT_FS
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-#endif
        pgoff_t start_fofs, end_fofs;
        block_t start_blkaddr;
 
@@ -80,9 +77,8 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                return 0;
        }
 
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->total_hit_ext++;
-#endif
+       stat_inc_hit_ext(inode->i_sb);
+
        start_fofs = fi->ext.fofs;
        end_fofs = fi->ext.fofs + fi->ext.len - 1;
        start_blkaddr = fi->ext.blk_addr;
@@ -100,9 +96,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                else
                        bh_result->b_size = UINT_MAX;
 
-#ifdef CONFIG_F2FS_STAT_FS
-               sbi->read_hit_ext++;
-#endif
+               stat_inc_hit_ext(inode->i_sb);
                read_unlock(&fi->ext.ext_lock);
                return 1;
        }
@@ -560,9 +554,9 @@ write:
                inode_dec_dirty_dents(inode);
                err = do_write_data_page(page);
        } else {
-               int ilock = mutex_lock_op(sbi);
+               f2fs_lock_op(sbi);
                err = do_write_data_page(page);
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                need_balance_fs = true;
        }
        if (err == -ENOENT)
@@ -641,7 +635,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
        struct dnode_of_data dn;
        int err = 0;
-       int ilock;
 
        f2fs_balance_fs(sbi);
 repeat:
@@ -650,7 +643,7 @@ repeat:
                return -ENOMEM;
        *pagep = page;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
@@ -664,7 +657,7 @@ repeat:
        if (err)
                goto err;
 
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
                return 0;
@@ -700,7 +693,7 @@ out:
        return 0;
 
 err:
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        f2fs_put_page(page, 1);
        return err;
 }
@@ -727,7 +720,7 @@ static int f2fs_write_end(struct file *file,
 }
 
 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -736,7 +729,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       return blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                                  get_data_block_ro);
 }
 
index 608f0df5b9190f8e8b301dd61e085fa70c66c5db..590a09efce4a3a8fde298f6d9999212616ac4eb5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/crc32.h>
 #include <linux/magic.h>
 #include <linux/kobject.h>
+#include <linux/sched.h>
 
 /*
  * For mount options
@@ -317,14 +318,6 @@ enum count_type {
        NR_COUNT_TYPE,
 };
 
-/*
- * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
- * The checkpoint procedure blocks all the locks in this fs_lock array.
- * Some FS operations grab free locks, and if there is no free lock,
- * then wait to grab a lock in a round-robin manner.
- */
-#define NR_GLOBAL_LOCKS        8
-
 /*
  * The below are the page types of bios used in submti_bio().
  * The available types are:
@@ -365,12 +358,12 @@ struct f2fs_sb_info {
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
-       struct mutex fs_lock[NR_GLOBAL_LOCKS];  /* blocking FS operations */
+       struct rw_semaphore cp_rwsem;           /* blocking FS operations */
        struct mutex node_write;                /* locking node writes */
        struct mutex writepages;                /* mutex for writepages() */
-       unsigned char next_lock_num;            /* round-robin global locks */
-       int por_doing;                          /* recovery is doing or not */
-       int on_build_free_nids;                 /* build_free_nids is doing */
+       bool por_doing;                         /* recovery is doing or not */
+       bool on_build_free_nids;                /* build_free_nids is doing */
+       struct task_struct *cp_task;            /* checkpoint task */
 
        /* for orphan inode management */
        struct list_head orphan_inode_list;     /* orphan inode list */
@@ -520,48 +513,24 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
-       int i;
-
-       for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
-               /*
-                * This is the only time we take multiple fs_lock[]
-                * instances; the order is immaterial since we
-                * always hold cp_mutex, which serializes multiple
-                * such operations.
-                */
-               mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
-       }
+       down_read(&sbi->cp_rwsem);
 }
 
-static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-       int i = 0;
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               mutex_unlock(&sbi->fs_lock[i]);
+       up_read(&sbi->cp_rwsem);
 }
 
-static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-       unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
-       int i = 0;
-
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               if (mutex_trylock(&sbi->fs_lock[i]))
-                       return i;
-
-       mutex_lock(&sbi->fs_lock[next_lock]);
-       sbi->next_lock_num++;
-       return next_lock;
+       down_write_nest_lock(&sbi->cp_rwsem, &sbi->cp_mutex);
 }
 
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-       if (ilock < 0)
-               return;
-       BUG_ON(ilock >= NR_GLOBAL_LOCKS);
-       mutex_unlock(&sbi->fs_lock[ilock]);
+       up_write(&sbi->cp_rwsem);
 }
 
 /*
@@ -819,6 +788,20 @@ static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
        return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
 }
 
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+                                               gfp_t flags)
+{
+       void *entry;
+retry:
+       entry = kmem_cache_alloc(cachep, flags);
+       if (!entry) {
+               cond_resched();
+               goto retry;
+       }
+
+       return entry;
+}
+
 #define RAW_IS_INODE(p)        ((p)->footer.nid == (p)->footer.ino)
 
 static inline bool IS_INODE(struct page *page)
@@ -1172,7 +1155,13 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
        return (struct f2fs_stat_info*)sbi->stat_info;
 }
 
-#define stat_inc_call_count(si)        ((si)->call_count++)
+#define stat_inc_call_count(si)                ((si)->call_count++)
+#define stat_inc_bggc_count(sbi)       ((sbi)->bg_gc++)
+#define stat_inc_dirty_dir(sbi)                ((sbi)->n_dirty_dirs++)
+#define stat_dec_dirty_dir(sbi)                ((sbi)->n_dirty_dirs--)
+#define stat_inc_hit_ext(sb)           ((F2FS_SB(sb))->total_hit_ext++)
+#define stat_inc_alloc_type(sbi, curseg)                               \
+               ((sbi)->segment_count[(curseg)->alloc_type]++)
 
 #define stat_inc_seg_count(sbi, type)                                  \
        do {                                                            \
@@ -1201,12 +1190,18 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
                si->node_blks += (blks);                                \
        } while (0)
 
+
 int f2fs_build_stats(struct f2fs_sb_info *);
 void f2fs_destroy_stats(struct f2fs_sb_info *);
 void __init f2fs_create_root_stats(void);
 void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_call_count(si)
+#define stat_inc_bggc_count(si)
+#define stat_inc_dirty_dir(sbi)
+#define stat_dec_dirty_dir(sbi)
+#define stat_inc_hit_ext(sb)
+#define stat_inc_alloc_type(sbi, curseg)
 #define stat_inc_seg_count(si, type)
 #define stat_inc_tot_blk_count(si, blks)
 #define stat_inc_data_blk_count(si, blks)
index 02c906971cc6311f43527a6adfb9f75ee77eae9e..f27eb0b725375696f16cbf2085a012172df0558e 100644 (file)
@@ -35,18 +35,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        block_t old_blk_addr;
        struct dnode_of_data dn;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
        sb_start_pagefault(inode->i_sb);
 
        /* block allocation */
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                goto out;
        }
 
@@ -56,12 +56,12 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
                err = reserve_new_block(&dn);
                if (err) {
                        f2fs_put_dnode(&dn);
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                        goto out;
                }
        }
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        file_update_time(vma->vm_file);
        lock_page(page);
@@ -270,7 +270,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
        unsigned int blocksize = inode->i_sb->s_blocksize;
        struct dnode_of_data dn;
        pgoff_t free_from;
-       int count = 0, ilock = -1;
+       int count = 0;
        int err;
 
        trace_f2fs_truncate_blocks_enter(inode, from);
@@ -278,13 +278,13 @@ static int truncate_blocks(struct inode *inode, u64 from)
        free_from = (pgoff_t)
                        ((from + blocksize - 1) >> (sbi->log_blocksize));
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
        if (err) {
                if (err == -ENOENT)
                        goto free_next;
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                trace_f2fs_truncate_blocks_exit(inode, err);
                return err;
        }
@@ -305,7 +305,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
        f2fs_put_dnode(&dn);
 free_next:
        err = truncate_inode_blocks(inode, free_from);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        /* lastly zero out the first data page */
        truncate_partial_data_page(inode, from);
@@ -416,16 +416,15 @@ static void fill_zero(struct inode *inode, pgoff_t index,
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct page *page;
-       int ilock;
 
        if (!len)
                return;
 
        f2fs_balance_fs(sbi);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        page = get_new_data_page(inode, NULL, index, false);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if (!IS_ERR(page)) {
                wait_on_page_writeback(page);
@@ -484,7 +483,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
                        struct address_space *mapping = inode->i_mapping;
                        loff_t blk_start, blk_end;
                        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-                       int ilock;
 
                        f2fs_balance_fs(sbi);
 
@@ -493,9 +491,9 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
 
-                       ilock = mutex_lock_op(sbi);
+                       f2fs_lock_op(sbi);
                        ret = truncate_hole(inode, pg_start, pg_end);
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                }
        }
 
@@ -529,13 +527,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 
        for (index = pg_start; index <= pg_end; index++) {
                struct dnode_of_data dn;
-               int ilock;
 
-               ilock = mutex_lock_op(sbi);
+               f2fs_lock_op(sbi);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
                if (ret) {
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                        break;
                }
 
@@ -543,12 +540,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
                        ret = reserve_new_block(&dn);
                        if (ret) {
                                f2fs_put_dnode(&dn);
-                               mutex_unlock_op(sbi, ilock);
+                               f2fs_unlock_op(sbi);
                                break;
                        }
                }
                f2fs_put_dnode(&dn);
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
 
                if (pg_start == pg_end)
                        new_size = offset + len;
@@ -685,8 +682,8 @@ const struct file_operations f2fs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .open           = generic_file_open,
        .mmap           = f2fs_file_mmap,
        .fsync          = f2fs_sync_file,
index 2f157e883687d5edb07f7cd53745d0ab6571cb6a..cb286d7b02b284a9cc6d89ca942f087db8f8be9b 100644 (file)
@@ -77,9 +77,7 @@ static int gc_thread_func(void *data)
                else
                        wait_ms = increase_sleep_time(gc_th, wait_ms);
 
-#ifdef CONFIG_F2FS_STAT_FS
-               sbi->bg_gc++;
-#endif
+               stat_inc_bggc_count(sbi);
 
                /* if return value is not zero, no victim was selected */
                if (f2fs_gc(sbi))
@@ -236,8 +234,8 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
        return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 }
 
-static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
-                                       struct victim_sel_policy *p)
+static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
+                       unsigned int segno, struct victim_sel_policy *p)
 {
        if (p->alloc_mode == SSR)
                return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
@@ -293,7 +291,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        }
                        break;
                }
-               p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+               p.offset = segno + p.ofs_unit;
+               if (p.ofs_unit > 1)
+                       p.offset -= segno % p.ofs_unit;
+
                secno = GET_SECNO(sbi, segno);
 
                if (sec_usage_check(sbi, secno))
@@ -306,10 +308,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                if (p.min_cost > cost) {
                        p.min_segno = segno;
                        p.min_cost = cost;
-               }
-
-               if (cost == max_cost)
+               } else if (unlikely(cost == max_cost)) {
                        continue;
+               }
 
                if (nsearched++ >= p.max_search) {
                        sbi->last_victim[p.gc_mode] = segno;
@@ -358,12 +359,8 @@ static void add_gc_inode(struct inode *inode, struct list_head *ilist)
                iput(inode);
                return;
        }
-repeat:
-       new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
-       if (!new_ie) {
-               cond_resched();
-               goto repeat;
-       }
+
+       new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
        new_ie->inode = inode;
        list_add_tail(&new_ie->list, ilist);
 }
index 9339cd292047b897bbc7bb0c5ef5ff337f9190ab..7377ca3ce5c5b1a1a401b3f2a4e6f47492ab6433 100644 (file)
@@ -37,6 +37,31 @@ void f2fs_set_inode_flags(struct inode *inode)
                inode->i_flags |= S_DIRSYNC;
 }
 
+static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+                       S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+               if (ri->i_addr[0])
+                       inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+               else
+                       inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+       }
+}
+
+static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+               if (old_valid_dev(inode->i_rdev)) {
+                       ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+                       ri->i_addr[1] = 0;
+               } else {
+                       ri->i_addr[0] = 0;
+                       ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+                       ri->i_addr[2] = 0;
+               }
+       }
+}
+
 static int do_read_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -73,10 +98,6 @@ static int do_read_inode(struct inode *inode)
        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
        inode->i_generation = le32_to_cpu(ri->i_generation);
-       if (ri->i_addr[0])
-               inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
-       else
-               inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
 
        fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -84,8 +105,13 @@ static int do_read_inode(struct inode *inode)
        fi->flags = 0;
        fi->i_advise = ri->i_advise;
        fi->i_pino = le32_to_cpu(ri->i_pino);
+
        get_extent_info(&fi->ext, ri->i_ext);
        get_inline_info(fi, ri);
+
+       /* get rdev by using inline_info */
+       __get_inode_rdev(inode, ri);
+
        f2fs_put_page(node_page, 1);
        return 0;
 }
@@ -179,21 +205,10 @@ void update_inode(struct inode *inode, struct page *node_page)
        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
        ri->i_generation = cpu_to_le32(inode->i_generation);
 
-       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-               if (old_valid_dev(inode->i_rdev)) {
-                       ri->i_addr[0] =
-                               cpu_to_le32(old_encode_dev(inode->i_rdev));
-                       ri->i_addr[1] = 0;
-               } else {
-                       ri->i_addr[0] = 0;
-                       ri->i_addr[1] =
-                               cpu_to_le32(new_encode_dev(inode->i_rdev));
-                       ri->i_addr[2] = 0;
-               }
-       }
-
+       __set_inode_rdev(inode, ri);
        set_cold_node(inode, node_page);
        set_page_dirty(node_page);
+
        clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 }
 
@@ -214,7 +229,7 @@ int update_inode_page(struct inode *inode)
 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ret, ilock;
+       int ret;
 
        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
                        inode->i_ino == F2FS_META_INO(sbi))
@@ -227,9 +242,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
         * We need to lock here to prevent from producing dirty node pages
         * during the urgent cleaning time when runing out of free sections.
         */
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        ret = update_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if (wbc)
                f2fs_balance_fs(sbi);
@@ -243,7 +258,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 void f2fs_evict_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ilock;
 
        trace_f2fs_evict_inode(inode);
        truncate_inode_pages(&inode->i_data, 0);
@@ -265,9 +279,9 @@ void f2fs_evict_inode(struct inode *inode)
        if (F2FS_HAS_BLOCKS(inode))
                f2fs_truncate(inode);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        remove_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        sb_end_intwrite(inode->i_sb);
 no_delete:
index 2a5359c990fc09b0141982a2e26b345033848245..575adac17f8be2b55935498b0a49a15b78daddcf 100644 (file)
@@ -27,19 +27,19 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
        nid_t ino;
        struct inode *inode;
        bool nid_free = false;
-       int err, ilock;
+       int err;
 
        inode = new_inode(sb);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        if (!alloc_nid(sbi, &ino)) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                err = -ENOSPC;
                goto fail;
        }
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        inode->i_uid = current_fsuid();
 
@@ -115,7 +115,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        nid_t ino = 0;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -131,9 +131,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
        ino = inode->i_ino;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -157,7 +157,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        struct inode *inode = old_dentry->d_inode;
        struct super_block *sb = dir->i_sb;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -165,9 +165,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        ihold(inode);
 
        set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -220,7 +220,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        struct f2fs_dir_entry *de;
        struct page *page;
        int err = -ENOENT;
-       int ilock;
 
        trace_f2fs_unlink_enter(dir, dentry);
        f2fs_balance_fs(sbi);
@@ -229,16 +228,16 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        if (!de)
                goto fail;
 
+       f2fs_lock_op(sbi);
        err = acquire_orphan_inode(sbi);
        if (err) {
+               f2fs_unlock_op(sbi);
                kunmap(page);
                f2fs_put_page(page, 0);
                goto fail;
        }
-
-       ilock = mutex_lock_op(sbi);
        f2fs_delete_entry(de, page, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        /* In order to evict this inode,  we set it dirty */
        mark_inode_dirty(inode);
@@ -254,7 +253,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        size_t symlen = strlen(symname) + 1;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -265,9 +264,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
        inode->i_op = &f2fs_symlink_inode_operations;
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -290,7 +289,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
        struct inode *inode;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -304,9 +303,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 
        set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out_fail;
 
@@ -342,7 +341,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        int err = 0;
-       int ilock;
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -356,9 +354,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        init_special_inode(inode, inode->i_mode, rdev);
        inode->i_op = &f2fs_special_inode_operations;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -387,7 +385,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct f2fs_dir_entry *old_dir_entry = NULL;
        struct f2fs_dir_entry *old_entry;
        struct f2fs_dir_entry *new_entry;
-       int err = -ENOENT, ilock = -1;
+       int err = -ENOENT;
 
        f2fs_balance_fs(sbi);
 
@@ -402,7 +400,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_old;
        }
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
 
        if (new_inode) {
 
@@ -467,7 +465,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                update_inode_page(old_dir);
        }
 
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        return 0;
 
 put_out_dir:
@@ -477,7 +475,7 @@ out_dir:
                kunmap(old_dir_page);
                f2fs_put_page(old_dir_page, 0);
        }
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 out_old:
        kunmap(old_page);
        f2fs_put_page(old_page, 0);
index 51ef2789443322ea0e3a2d683d305b753997aeae..cc119b65a0d3ff9dc6b96283a703497c591e7cce 100644 (file)
@@ -1156,6 +1156,9 @@ static int f2fs_write_node_page(struct page *page,
        block_t new_addr;
        struct node_info ni;
 
+       if (sbi->por_doing)
+               goto redirty_out;
+
        wait_on_page_writeback(page);
 
        /* get old block addr of this node page */
@@ -1171,12 +1174,8 @@ static int f2fs_write_node_page(struct page *page,
                return 0;
        }
 
-       if (wbc->for_reclaim) {
-               dec_page_count(sbi, F2FS_DIRTY_NODES);
-               wbc->pages_skipped++;
-               set_page_dirty(page);
-               return AOP_WRITEPAGE_ACTIVATE;
-       }
+       if (wbc->for_reclaim)
+               goto redirty_out;
 
        mutex_lock(&sbi->node_write);
        set_page_writeback(page);
@@ -1186,6 +1185,12 @@ static int f2fs_write_node_page(struct page *page,
        mutex_unlock(&sbi->node_write);
        unlock_page(page);
        return 0;
+
+redirty_out:
+       dec_page_count(sbi, F2FS_DIRTY_NODES);
+       wbc->pages_skipped++;
+       set_page_dirty(page);
+       return AOP_WRITEPAGE_ACTIVATE;
 }
 
 /*
@@ -1291,23 +1296,18 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
        if (nid == 0)
                return 0;
 
-       if (!build)
-               goto retry;
-
-       /* do not add allocated nids */
-       read_lock(&nm_i->nat_tree_lock);
-       ne = __lookup_nat_cache(nm_i, nid);
-       if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
-               allocated = true;
-       read_unlock(&nm_i->nat_tree_lock);
-       if (allocated)
-               return 0;
-retry:
-       i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
-       if (!i) {
-               cond_resched();
-               goto retry;
+       if (build) {
+               /* do not add allocated nids */
+               read_lock(&nm_i->nat_tree_lock);
+               ne = __lookup_nat_cache(nm_i, nid);
+               if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+                       allocated = true;
+               read_unlock(&nm_i->nat_tree_lock);
+               if (allocated)
+                       return 0;
        }
+
+       i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
        i->nid = nid;
        i->state = NID_NEW;
 
@@ -1439,9 +1439,9 @@ retry:
 
        /* Let's scan nat pages and its caches to get free nids */
        mutex_lock(&nm_i->build_lock);
-       sbi->on_build_free_nids = 1;
+       sbi->on_build_free_nids = true;
        build_free_nids(sbi);
-       sbi->on_build_free_nids = 0;
+       sbi->on_build_free_nids = false;
        mutex_unlock(&nm_i->build_lock);
        goto retry;
 }
index 51ef5eec33d7fec07503e6eb9c90b86256b1dc3a..b278c68b3e0801d0a14454f7e96b74076507ba62 100644 (file)
@@ -64,24 +64,31 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
        name.name = raw_inode->i_name;
 retry:
        de = f2fs_find_entry(dir, &name, &page);
-       if (de && inode->i_ino == le32_to_cpu(de->ino)) {
-               kunmap(page);
-               f2fs_put_page(page, 0);
-               goto out;
-       }
+       if (de && inode->i_ino == le32_to_cpu(de->ino))
+               goto out_unmap_put;
        if (de) {
                einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
                if (IS_ERR(einode)) {
                        WARN_ON(1);
                        if (PTR_ERR(einode) == -ENOENT)
                                err = -EEXIST;
-                       goto out;
+                       goto out_unmap_put;
+               }
+               err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
+               if (err) {
+                       iput(einode);
+                       goto out_unmap_put;
                }
                f2fs_delete_entry(de, page, einode);
                iput(einode);
                goto retry;
        }
        err = __f2fs_add_link(dir, &name, inode);
+       goto out;
+
+out_unmap_put:
+       kunmap(page);
+       f2fs_put_page(page, 0);
 out:
        f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
                        "ino = %x, name = %s, dir = %lx, err = %d",
@@ -285,7 +292,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        struct f2fs_summary sum;
        struct node_info ni;
        int err = 0, recovered = 0;
-       int ilock;
 
        start = start_bidx_of_node(ofs_of_node(page), fi);
        if (IS_INODE(page))
@@ -293,12 +299,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        else
                end = start + ADDRS_PER_BLOCK;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
 
        err = get_dnode_of_data(&dn, start, ALLOC_NODE);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                return err;
        }
 
@@ -349,7 +355,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
 err:
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
                        "recovered_data = %d blocks, err = %d",
@@ -419,6 +425,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
 {
        struct list_head inode_list;
        int err;
+       bool need_writecp = false;
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
                        sizeof(struct fsync_inode_entry), NULL);
@@ -428,7 +435,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        INIT_LIST_HEAD(&inode_list);
 
        /* step #1: find fsynced inode numbers */
-       sbi->por_doing = 1;
+       sbi->por_doing = true;
        err = find_fsync_dnodes(sbi, &inode_list);
        if (err)
                goto out;
@@ -436,14 +443,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        if (list_empty(&inode_list))
                goto out;
 
+       need_writecp = true;
+
        /* step #2: recover data */
        err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
        BUG_ON(!list_empty(&inode_list));
 out:
        destroy_fsync_dnodes(&inode_list);
        kmem_cache_destroy(fsync_entry_slab);
-       sbi->por_doing = 0;
-       if (!err)
+       sbi->por_doing = false;
+       if (!err && need_writecp)
                write_checkpoint(sbi, false);
        return err;
 }
index 09af9c7b0f52673fff92f00be5a37a6030b72668..c9c276e5316931eb461f0c79ff49e4d91b5f6782 100644 (file)
@@ -78,10 +78,14 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
        if (dirty_type == DIRTY) {
                enum dirty_type t = DIRTY_HOT_DATA;
 
-               /* clear all the bitmaps */
-               for (; t <= DIRTY_COLD_NODE; t++)
-                       if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+               /* clear its dirty bitmap */
+               for (; t <= DIRTY_COLD_NODE; t++) {
+                       if (test_and_clear_bit(segno,
+                                               dirty_i->dirty_segmap[t])) {
                                dirty_i->nr_dirty[t]--;
+                               break;
+                       }
+               }
 
                if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
                        clear_bit(GET_SECNO(sbi, segno),
@@ -550,9 +554,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
                change_curseg(sbi, type, true);
        else
                new_curseg(sbi, type, false);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->segment_count[curseg->alloc_type]++;
-#endif
+
+       stat_inc_alloc_type(sbi, curseg);
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -597,6 +600,10 @@ static void f2fs_end_io_write(struct bio *bio, int err)
 
        if (p->is_sync)
                complete(p->wait);
+
+       if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task)
+               wake_up_process(p->sbi->cp_task);
+
        kfree(p);
        bio_put(bio);
 }
@@ -657,6 +664,7 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
                                block_t blk_addr, enum page_type type)
 {
        struct block_device *bdev = sbi->sb->s_bdev;
+       int bio_blocks;
 
        verify_block_addr(sbi, blk_addr);
 
@@ -676,7 +684,8 @@ retry:
                        goto retry;
                }
 
-               sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
+               bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+               sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
                sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
                sbi->bio[type]->bi_private = priv;
                /*
@@ -801,9 +810,8 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
 
        mutex_lock(&sit_i->sentry_lock);
        __refresh_next_blkoff(sbi, curseg);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->block_count[curseg->alloc_type]++;
-#endif
+
+       stat_inc_alloc_type(sbi, curseg);
 
        /*
         * SIT information should be updated before segment allocation,
@@ -1271,9 +1279,9 @@ static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
                        __mark_sit_entry_dirty(sbi, segno);
                }
                update_sits_in_cursum(sum, -sits_in_cursum(sum));
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 /*
index bdd10eab8c40d7c25fc24eebbeed2a0f705e902b..7f94d78cda3dea186b142970223f0a3863781cab 100644 (file)
@@ -90,6 +90,8 @@
        (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
 #define SECTOR_TO_BLOCK(sbi, sectors)                                  \
        (sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+#define MAX_BIO_BLOCKS(max_hw_blocks)                                  \
+       (min((int)max_hw_blocks, BIO_MAX_PAGES))
 
 /* during checkpoint, bio_private is used to synchronize the last bio */
 struct bio_private {
index 13d0a0fe49dd413ed70d3a10dc864d27bea5c79a..9a094596fd21a53961e66ef7670f10833079ceda 100644 (file)
@@ -43,7 +43,9 @@ enum {
        Opt_disable_roll_forward,
        Opt_discard,
        Opt_noheap,
+       Opt_user_xattr,
        Opt_nouser_xattr,
+       Opt_acl,
        Opt_noacl,
        Opt_active_logs,
        Opt_disable_ext_identify,
@@ -56,7 +58,9 @@ static match_table_t f2fs_tokens = {
        {Opt_disable_roll_forward, "disable_roll_forward"},
        {Opt_discard, "discard"},
        {Opt_noheap, "no_heap"},
+       {Opt_user_xattr, "user_xattr"},
        {Opt_nouser_xattr, "nouser_xattr"},
+       {Opt_acl, "acl"},
        {Opt_noacl, "noacl"},
        {Opt_active_logs, "active_logs=%u"},
        {Opt_disable_ext_identify, "disable_ext_identify"},
@@ -237,6 +241,9 @@ static int parse_options(struct super_block *sb, char *options)
                        set_opt(sbi, NOHEAP);
                        break;
 #ifdef CONFIG_F2FS_FS_XATTR
+               case Opt_user_xattr:
+                       set_opt(sbi, XATTR_USER);
+                       break;
                case Opt_nouser_xattr:
                        clear_opt(sbi, XATTR_USER);
                        break;
@@ -244,6 +251,10 @@ static int parse_options(struct super_block *sb, char *options)
                        set_opt(sbi, INLINE_XATTR);
                        break;
 #else
+               case Opt_user_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "user_xattr options not supported");
+                       break;
                case Opt_nouser_xattr:
                        f2fs_msg(sb, KERN_INFO,
                                "nouser_xattr options not supported");
@@ -254,10 +265,16 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
+               case Opt_acl:
+                       set_opt(sbi, POSIX_ACL);
+                       break;
                case Opt_noacl:
                        clear_opt(sbi, POSIX_ACL);
                        break;
 #else
+               case Opt_acl:
+                       f2fs_msg(sb, KERN_INFO, "acl options not supported");
+                       break;
                case Opt_noacl:
                        f2fs_msg(sb, KERN_INFO, "noacl options not supported");
                        break;
@@ -355,7 +372,9 @@ static void f2fs_put_super(struct super_block *sb)
        f2fs_destroy_stats(sbi);
        stop_gc_thread(sbi);
 
-       write_checkpoint(sbi, true);
+       /* We don't need to do checkpoint when it's clean */
+       if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+               write_checkpoint(sbi, true);
 
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
@@ -727,30 +746,47 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
                atomic_set(&sbi->nr_pages[i], 0);
 }
 
-static int validate_superblock(struct super_block *sb,
-               struct f2fs_super_block **raw_super,
-               struct buffer_head **raw_super_buf, sector_t block)
+/*
+ * Read f2fs raw super block.
+ * Because we have two copies of super block, so read the first one at first,
+ * if the first one is invalid, move to read the second one.
+ */
+static int read_raw_super_block(struct super_block *sb,
+                       struct f2fs_super_block **raw_super,
+                       struct buffer_head **raw_super_buf)
 {
-       const char *super = (block == 0 ? "first" : "second");
+       int block = 0;
 
-       /* read f2fs raw super block */
+retry:
        *raw_super_buf = sb_bread(sb, block);
        if (!*raw_super_buf) {
-               f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
-                               super);
-               return -EIO;
+               f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+                               block + 1);
+               if (block == 0) {
+                       block++;
+                       goto retry;
+               } else {
+                       return -EIO;
+               }
        }
 
        *raw_super = (struct f2fs_super_block *)
                ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
 
        /* sanity checking of raw super */
-       if (!sanity_check_raw_super(sb, *raw_super))
-               return 0;
+       if (sanity_check_raw_super(sb, *raw_super)) {
+               brelse(*raw_super_buf);
+               f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+                               "in %dth superblock", block + 1);
+               if(block == 0) {
+                       block++;
+                       goto retry;
+               } else {
+                       return -EINVAL;
+               }
+       }
 
-       f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
-                               "in %s superblock", super);
-       return -EINVAL;
+       return 0;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -760,7 +796,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        struct buffer_head *raw_super_buf;
        struct inode *root;
        long err = -EINVAL;
-       int i;
 
        /* allocate memory for f2fs-specific super block info */
        sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
@@ -773,14 +808,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_sbi;
        }
 
-       err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
-       if (err) {
-               brelse(raw_super_buf);
-               /* check secondary superblock when primary failed */
-               err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
-               if (err)
-                       goto free_sb_buf;
-       }
+       err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
+       if (err)
+               goto free_sbi;
+
        sb->s_fs_info = sbi;
        /* init some FS parameters */
        sbi->active_logs = NR_CURSEG_TYPE;
@@ -818,12 +849,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        mutex_init(&sbi->gc_mutex);
        mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
-       for (i = 0; i < NR_GLOBAL_LOCKS; i++)
-               mutex_init(&sbi->fs_lock[i]);
        mutex_init(&sbi->node_write);
-       sbi->por_doing = 0;
+       sbi->por_doing = false;
        spin_lock_init(&sbi->stat_lock);
        init_rwsem(&sbi->bio_sem);
+       init_rwsem(&sbi->cp_rwsem);
        init_sb_info(sbi);
 
        /* get an inode for meta space */
index 1ac8a5f6e38096a702c62d2520b5b001a87f9fa2..f685138dd49607fce91e259c5d09872aece53bf5 100644 (file)
@@ -154,6 +154,9 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
 }
 
 #ifdef CONFIG_F2FS_FS_SECURITY
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+                       const char *name, const void *value, size_t value_len,
+                       struct page *ipage);
 static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
                void *page)
 {
@@ -161,7 +164,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
        int err = 0;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+               err = __f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
                                xattr->name, xattr->value,
                                xattr->value_len, (struct page *)page);
                if (err < 0)
@@ -469,16 +472,15 @@ cleanup:
        return error;
 }
 
-int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-                       const void *value, size_t value_len, struct page *ipage)
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+                       const char *name, const void *value, size_t value_len,
+                       struct page *ipage)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        struct f2fs_xattr_entry *here, *last;
        void *base_addr;
        int found, newsize;
        size_t name_len;
-       int ilock;
        __u32 new_hsize;
        int error = -ENOMEM;
 
@@ -493,10 +495,6 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode))
                return -ERANGE;
 
-       f2fs_balance_fs(sbi);
-
-       ilock = mutex_lock_op(sbi);
-
        base_addr = read_all_xattrs(inode, ipage);
        if (!base_addr)
                goto exit;
@@ -578,7 +576,21 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        else
                update_inode_page(inode);
 exit:
-       mutex_unlock_op(sbi, ilock);
        kzfree(base_addr);
        return error;
 }
+
+int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+                       const void *value, size_t value_len, struct page *ipage)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       int err;
+
+       f2fs_balance_fs(sbi);
+
+       f2fs_lock_op(sbi);
+       err = __f2fs_setxattr(inode, name_index, name, value, value_len, ipage);
+       f2fs_unlock_op(sbi);
+
+       return err;
+}
index 9b104f543056238016c683ef822046a784169f50..33711ff2b4a3e495a886ef09c229431fbead4598 100644 (file)
@@ -172,8 +172,8 @@ const struct file_operations fat_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = fat_file_release,
        .unlocked_ioctl = fat_generic_ioctl,
index 0062da21dd8b7995aa1764bfd943cde9d2e22364..3134d1ede2925830fc16a6ae8d4bd3c9c5f59fae 100644 (file)
@@ -185,8 +185,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -203,7 +202,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
                 *
                 * Return 0, and fallback to normal buffered write.
                 */
-               loff_t size = offset + iov_length(iov, nr_segs);
+               loff_t size = offset + iov_iter_count(iter);
                if (MSDOS_I(inode)->mmu_private < size)
                        return 0;
        }
@@ -212,10 +211,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
         * FAT need to use the DIO_LOCKING for avoiding the race
         * condition of fat_get_block() and ->truncate().
         */
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                fat_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
        if (ret < 0 && (rw & WRITE))
-               fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               fat_write_failed(mapping, offset + iov_iter_count(iter));
 
        return ret;
 }
index b2a86e324aac05f7bf64b7ce0d0e2c30d91f3d68..29d7feb62cf7a5784a3828d9e6b19db4c30ceb01 100644 (file)
@@ -58,15 +58,16 @@ void fscache_cookie_init_once(void *_cookie)
 struct fscache_cookie *__fscache_acquire_cookie(
        struct fscache_cookie *parent,
        const struct fscache_cookie_def *def,
-       void *netfs_data)
+       void *netfs_data,
+       bool enable)
 {
        struct fscache_cookie *cookie;
 
        BUG_ON(!def);
 
-       _enter("{%s},{%s},%p",
+       _enter("{%s},{%s},%p,%u",
               parent ? (char *) parent->def->name : "<no-parent>",
-              def->name, netfs_data);
+              def->name, netfs_data, enable);
 
        fscache_stat(&fscache_n_acquires);
 
@@ -106,7 +107,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
        cookie->def             = def;
        cookie->parent          = parent;
        cookie->netfs_data      = netfs_data;
-       cookie->flags           = 0;
+       cookie->flags           = (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
        /* radix tree insertion won't use the preallocation pool unless it's
         * told it may not wait */
@@ -124,16 +125,22 @@ struct fscache_cookie *__fscache_acquire_cookie(
                break;
        }
 
-       /* if the object is an index then we need do nothing more here - we
-        * create indices on disk when we need them as an index may exist in
-        * multiple caches */
-       if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
-               if (fscache_acquire_non_index_cookie(cookie) < 0) {
-                       atomic_dec(&parent->n_children);
-                       __fscache_cookie_put(cookie);
-                       fscache_stat(&fscache_n_acquires_nobufs);
-                       _leave(" = NULL");
-                       return NULL;
+       if (enable) {
+               /* if the object is an index then we need do nothing more here
+                * - we create indices on disk when we need them as an index
+                * may exist in multiple caches */
+               if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+                       if (fscache_acquire_non_index_cookie(cookie) == 0) {
+                               set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+                       } else {
+                               atomic_dec(&parent->n_children);
+                               __fscache_cookie_put(cookie);
+                               fscache_stat(&fscache_n_acquires_nobufs);
+                               _leave(" = NULL");
+                               return NULL;
+                       }
+               } else {
+                       set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
                }
        }
 
@@ -143,6 +150,39 @@ struct fscache_cookie *__fscache_acquire_cookie(
 }
 EXPORT_SYMBOL(__fscache_acquire_cookie);
 
+/*
+ * Enable a cookie to permit it to accept new operations.
+ */
+void __fscache_enable_cookie(struct fscache_cookie *cookie,
+                            bool (*can_enable)(void *data),
+                            void *data)
+{
+       _enter("%p", cookie);
+
+       wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+
+       if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+               goto out_unlock;
+
+       if (can_enable && !can_enable(data)) {
+               /* The netfs decided it didn't want to enable after all */
+       } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+               /* Wait for outstanding disablement to complete */
+               __fscache_wait_on_invalidate(cookie);
+
+               if (fscache_acquire_non_index_cookie(cookie) == 0)
+                       set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+       } else {
+               set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+       }
+
+out_unlock:
+       clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+       wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+}
+EXPORT_SYMBOL(__fscache_enable_cookie);
+
 /*
  * acquire a non-index cookie
  * - this must make sure the index chain is instantiated and instantiate the
@@ -157,7 +197,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        _enter("");
 
-       cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
+       set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
        /* now we need to see whether the backing objects for this cookie yet
         * exist, if not there'll be nothing to search */
@@ -180,9 +220,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        _debug("cache %s", cache->tag->name);
 
-       cookie->flags =
-               (1 << FSCACHE_COOKIE_LOOKING_UP) |
-               (1 << FSCACHE_COOKIE_NO_DATA_YET);
+       set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
 
        /* ask the cache to allocate objects for this cookie and its parent
         * chain */
@@ -398,7 +436,8 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
        if (!hlist_empty(&cookie->backing_objects)) {
                spin_lock(&cookie->lock);
 
-               if (!hlist_empty(&cookie->backing_objects) &&
+               if (fscache_cookie_enabled(cookie) &&
+                   !hlist_empty(&cookie->backing_objects) &&
                    !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
                                      &cookie->flags)) {
                        object = hlist_entry(cookie->backing_objects.first,
@@ -452,10 +491,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
 
        spin_lock(&cookie->lock);
 
-       /* update the index entry on disk in each cache backing this cookie */
-       hlist_for_each_entry(object,
-                            &cookie->backing_objects, cookie_link) {
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+       if (fscache_cookie_enabled(cookie)) {
+               /* update the index entry on disk in each cache backing this
+                * cookie.
+                */
+               hlist_for_each_entry(object,
+                                    &cookie->backing_objects, cookie_link) {
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+               }
        }
 
        spin_unlock(&cookie->lock);
@@ -464,28 +507,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
 EXPORT_SYMBOL(__fscache_update_cookie);
 
 /*
- * release a cookie back to the cache
- * - the object will be marked as recyclable on disk if retire is true
- * - all dependents of this cookie must have already been unregistered
- *   (indices/files/pages)
+ * Disable a cookie to stop it from accepting new requests from the netfs.
  */
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
 {
        struct fscache_object *object;
+       bool awaken = false;
 
-       fscache_stat(&fscache_n_relinquishes);
-       if (retire)
-               fscache_stat(&fscache_n_relinquishes_retire);
-
-       if (!cookie) {
-               fscache_stat(&fscache_n_relinquishes_null);
-               _leave(" [no cookie]");
-               return;
-       }
-
-       _enter("%p{%s,%p,%d},%d",
-              cookie, cookie->def->name, cookie->netfs_data,
-              atomic_read(&cookie->n_active), retire);
+       _enter("%p,%u", cookie, invalidate);
 
        ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
 
@@ -495,24 +524,82 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
                BUG();
        }
 
-       /* No further netfs-accessing operations on this cookie permitted */
-       set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
-       if (retire)
-               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+       wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+       if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+               goto out_unlock_enable;
+
+       /* If the cookie is being invalidated, wait for that to complete first
+        * so that we can reuse the flag.
+        */
+       __fscache_wait_on_invalidate(cookie);
+
+       /* Dispose of the backing objects */
+       set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
 
        spin_lock(&cookie->lock);
-       hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+       if (!hlist_empty(&cookie->backing_objects)) {
+               hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
+                       if (invalidate)
+                               set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+               }
+       } else {
+               if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+                       awaken = true;
        }
        spin_unlock(&cookie->lock);
+       if (awaken)
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
 
        /* Wait for cessation of activity requiring access to the netfs (when
-        * n_active reaches 0).
+        * n_active reaches 0).  This makes sure outstanding reads and writes
+        * have completed.
         */
        if (!atomic_dec_and_test(&cookie->n_active))
                wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
                                 TASK_UNINTERRUPTIBLE);
 
+       /* Reset the cookie state if it wasn't relinquished */
+       if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
+               atomic_inc(&cookie->n_active);
+               set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+       }
+
+out_unlock_enable:
+       clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+       wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+       _leave("");
+}
+EXPORT_SYMBOL(__fscache_disable_cookie);
+
+/*
+ * release a cookie back to the cache
+ * - the object will be marked as recyclable on disk if retire is true
+ * - all dependents of this cookie must have already been unregistered
+ *   (indices/files/pages)
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+       fscache_stat(&fscache_n_relinquishes);
+       if (retire)
+               fscache_stat(&fscache_n_relinquishes_retire);
+
+       if (!cookie) {
+               fscache_stat(&fscache_n_relinquishes_null);
+               _leave(" [no cookie]");
+               return;
+       }
+
+       _enter("%p{%s,%p,%d},%d",
+              cookie, cookie->def->name, cookie->netfs_data,
+              atomic_read(&cookie->n_active), retire);
+
+       /* No further netfs-accessing operations on this cookie permitted */
+       set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
+
+       __fscache_disable_cookie(cookie, retire);
+
        /* Clear pointers back to the netfs */
        cookie->netfs_data      = NULL;
        cookie->def             = NULL;
@@ -568,6 +655,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,", cookie);
@@ -591,7 +679,8 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto inconsistent;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
@@ -600,7 +689,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 
        op->debug_id = atomic_inc_return(&fscache_op_debug_id);
 
-       atomic_inc(&cookie->n_active);
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, op) < 0)
                goto submit_failed;
 
@@ -622,9 +711,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
        return ret;
 
 submit_failed:
-       atomic_dec(&cookie->n_active);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 inconsistent:
        spin_unlock(&cookie->lock);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        kfree(op);
        _leave(" = -ESTALE");
        return -ESTALE;
index 10a2ade0bdf8c1ff390be0e82c5dd2e161732054..5a117df2a9ef7e51f89a55d1bc40bdac81b79393 100644 (file)
@@ -59,6 +59,7 @@ struct fscache_cookie fscache_fsdef_index = {
        .lock           = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
        .backing_objects = HLIST_HEAD_INIT,
        .def            = &fscache_fsdef_index_def,
+       .flags          = 1 << FSCACHE_COOKIE_ENABLED,
 };
 EXPORT_SYMBOL(fscache_fsdef_index);
 
index b1bb6117473a25a292ccf326dcd607fcf4a4f354..989f394015472bafe7758c19e006a16c57a59e4c 100644 (file)
@@ -45,6 +45,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        netfs->primary_index->def               = &fscache_fsdef_netfs_def;
        netfs->primary_index->parent            = &fscache_fsdef_index;
        netfs->primary_index->netfs_data        = netfs;
+       netfs->primary_index->flags             = 1 << FSCACHE_COOKIE_ENABLED;
 
        atomic_inc(&netfs->primary_index->parent->usage);
        atomic_inc(&netfs->primary_index->parent->n_children);
index 86d75a60b20c85543bd445869d9a57f37d42e920..dcb8216177747d033731bfd6b4a42ff903af44c5 100644 (file)
@@ -495,6 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
                 * returning ENODATA.
                 */
                set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+               clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
                _debug("wake up lookup %p", &cookie->flags);
                clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
@@ -527,6 +528,7 @@ void fscache_obtained_object(struct fscache_object *object)
 
                /* We do (presumably) have data */
                clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+               clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
                /* Allow write requests to begin stacking up and read requests
                 * to begin shovelling data.
@@ -679,7 +681,8 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
         */
        spin_lock(&cookie->lock);
        hlist_del_init(&object->cookie_link);
-       if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+       if (hlist_empty(&cookie->backing_objects) &&
+           test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
                awaken = true;
        spin_unlock(&cookie->lock);
 
@@ -927,7 +930,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
         */
        if (!fscache_use_cookie(object)) {
                ASSERT(object->cookie->stores.rnode == NULL);
-               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+               set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
                _leave(" [no cookie]");
                return transit_to(KILL_OBJECT);
        }
index 73899c1c34494555d73dd5714ecb21eb74c0296d..7f5c658af755f9b43296c1dacfab5def788c4a1c 100644 (file)
@@ -163,12 +163,10 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
 
        fscache_stat(&fscache_n_attr_changed_calls);
 
-       if (fscache_object_is_active(object) &&
-           fscache_use_cookie(object)) {
+       if (fscache_object_is_active(object)) {
                fscache_stat(&fscache_n_cop_attr_changed);
                ret = object->cache->ops->attr_changed(object);
                fscache_stat_d(&fscache_n_cop_attr_changed);
-               fscache_unuse_cookie(object);
                if (ret < 0)
                        fscache_abort_object(object);
        }
@@ -184,6 +182,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
+       bool wake_cookie;
 
        _enter("%p", cookie);
 
@@ -199,15 +198,19 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        }
 
        fscache_operation_init(op, fscache_attr_changed_op, NULL);
-       op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
+       op->flags = FSCACHE_OP_ASYNC |
+               (1 << FSCACHE_OP_EXCLUSIVE) |
+               (1 << FSCACHE_OP_UNUSE_COOKIE);
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        if (fscache_submit_exclusive_op(object, op) < 0)
                goto nobufs;
        spin_unlock(&cookie->lock);
@@ -217,8 +220,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        return 0;
 
 nobufs:
+       wake_cookie = __fscache_unuse_cookie(cookie);
        spin_unlock(&cookie->lock);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_attr_changed_nobufs);
        _leave(" = %d", -ENOBUFS);
        return -ENOBUFS;
@@ -263,7 +269,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
        }
 
        fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
-       atomic_inc(&cookie->n_active);
        op->op.flags    = FSCACHE_OP_MYTHREAD |
                (1UL << FSCACHE_OP_WAITING) |
                (1UL << FSCACHE_OP_UNUSE_COOKIE);
@@ -384,6 +389,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%p,,,", cookie, page);
@@ -405,7 +411,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                return -ERESTARTSYS;
 
        op = fscache_alloc_retrieval(cookie, page->mapping,
-                                    end_io_func,context);
+                                    end_io_func, context);
        if (!op) {
                _leave(" = -ENOMEM");
                return -ENOMEM;
@@ -414,13 +420,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
        ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
 
+       __fscache_use_cookie(cookie);
        atomic_inc(&object->n_reads);
        __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
 
@@ -475,9 +483,11 @@ error:
 
 nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        kfree(op);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
@@ -514,6 +524,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,,%d,,,", cookie, *nr_pages);
@@ -542,11 +553,13 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        atomic_inc(&object->n_reads);
        __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
 
@@ -601,10 +614,12 @@ error:
 
 nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
        _leave(" = -ENOBUFS");
@@ -626,6 +641,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%p,,,", cookie, page);
@@ -653,13 +669,15 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, &op->op) < 0)
-               goto nobufs_unlock;
+               goto nobufs_unlock_dec;
        spin_unlock(&cookie->lock);
 
        fscache_stat(&fscache_n_alloc_ops);
@@ -689,10 +707,13 @@ error:
        _leave(" = %d", ret);
        return ret;
 
+nobufs_unlock_dec:
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
 nobufs:
        fscache_stat(&fscache_n_allocs_nobufs);
        _leave(" = -ENOBUFS");
@@ -889,6 +910,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 {
        struct fscache_storage *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%x,", cookie, (u32) page->flags);
@@ -920,7 +942,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        ret = -ENOBUFS;
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
@@ -957,7 +980,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
        op->store_limit = object->store_limit;
 
-       atomic_inc(&cookie->n_active);
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, &op->op) < 0)
                goto submit_failed;
 
@@ -984,10 +1007,10 @@ already_pending:
        return 0;
 
 submit_failed:
-       atomic_dec(&cookie->n_active);
        spin_lock(&cookie->stores_lock);
        radix_tree_delete(&cookie->stores, page->index);
        spin_unlock(&cookie->stores_lock);
+       wake_cookie = __fscache_unuse_cookie(cookie);
        page_cache_release(page);
        ret = -ENOBUFS;
        goto nobufs;
@@ -999,6 +1022,8 @@ nobufs:
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_stores_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
index adbfd66b380f6bbc3d275234851746ba0c33c6d4..242fe3eb1ae8206b3e66ad00a891887851a7121f 100644 (file)
@@ -94,8 +94,11 @@ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
 
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 0);
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return fuse_direct_io(&io, &ii, count, &pos, 0);
 }
 
 static ssize_t cuse_write(struct file *file, const char __user *buf,
@@ -104,12 +107,15 @@ static ssize_t cuse_write(struct file *file, const char __user *buf,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
 
        /*
         * No locking or generic_write_checks(), the server is
         * responsible for locking and sanity checks.
         */
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 1);
+       return fuse_direct_io(&io, &ii, count, &pos, 1);
 }
 
 static int cuse_open(struct inode *inode, struct file *file)
@@ -589,11 +595,14 @@ static struct attribute *cuse_class_dev_attrs[] = {
 ATTRIBUTE_GROUPS(cuse_class_dev);
 
 static struct miscdevice cuse_miscdev = {
-       .minor          = MISC_DYNAMIC_MINOR,
+       .minor          = CUSE_MINOR,
        .name           = "cuse",
        .fops           = &cuse_channel_fops,
 };
 
+MODULE_ALIAS_MISCDEV(CUSE_MINOR);
+MODULE_ALIAS("devname:cuse");
+
 static int __init cuse_init(void)
 {
        int i, rc;
index 62b43b577bfce40b116a7a8eb77a91cea1c8b7b4..0747f6eed59836954a23743cddde06fef4d8c05c 100644 (file)
@@ -182,6 +182,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        struct inode *inode;
        struct dentry *parent;
        struct fuse_conn *fc;
+       struct fuse_inode *fi;
        int ret;
 
        inode = ACCESS_ONCE(entry->d_inode);
@@ -228,7 +229,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
                if (!err && !outarg.nodeid)
                        err = -ENOENT;
                if (!err) {
-                       struct fuse_inode *fi = get_fuse_inode(inode);
+                       fi = get_fuse_inode(inode);
                        if (outarg.nodeid != get_node_id(inode)) {
                                fuse_queue_forget(fc, forget, outarg.nodeid, 1);
                                goto invalid;
@@ -246,8 +247,11 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
                                       attr_version);
                fuse_change_entry_timeout(entry, &outarg);
        } else if (inode) {
-               fc = get_fuse_conn(inode);
-               if (fc->readdirplus_auto) {
+               fi = get_fuse_inode(inode);
+               if (flags & LOOKUP_RCU) {
+                       if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
+                               return -ECHILD;
+               } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
                        parent = dget_parent(entry);
                        fuse_advise_use_readdirplus(parent->d_inode);
                        dput(parent);
@@ -259,7 +263,8 @@ out:
 
 invalid:
        ret = 0;
-       if (check_submounts_and_drop(entry) != 0)
+
+       if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
                ret = 1;
        goto out;
 }
@@ -337,24 +342,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
        return err;
 }
 
-static struct dentry *fuse_materialise_dentry(struct dentry *dentry,
-                                             struct inode *inode)
-{
-       struct dentry *newent;
-
-       if (inode && S_ISDIR(inode->i_mode)) {
-               struct fuse_conn *fc = get_fuse_conn(inode);
-
-               mutex_lock(&fc->inst_mutex);
-               newent = d_materialise_unique(dentry, inode);
-               mutex_unlock(&fc->inst_mutex);
-       } else {
-               newent = d_materialise_unique(dentry, inode);
-       }
-
-       return newent;
-}
-
 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
                                  unsigned int flags)
 {
@@ -377,7 +364,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        if (inode && get_node_id(inode) == FUSE_ROOT_ID)
                goto out_iput;
 
-       newent = fuse_materialise_dentry(entry, inode);
+       newent = d_materialise_unique(entry, inode);
        err = PTR_ERR(newent);
        if (IS_ERR(newent))
                goto out_err;
@@ -596,21 +583,11 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
        }
        kfree(forget);
 
-       if (S_ISDIR(inode->i_mode)) {
-               struct dentry *alias;
-               mutex_lock(&fc->inst_mutex);
-               alias = d_find_alias(inode);
-               if (alias) {
-                       /* New directory must have moved since mkdir */
-                       mutex_unlock(&fc->inst_mutex);
-                       dput(alias);
-                       iput(inode);
-                       return -EBUSY;
-               }
-               d_instantiate(entry, inode);
-               mutex_unlock(&fc->inst_mutex);
-       } else
-               d_instantiate(entry, inode);
+       err = d_instantiate_no_diralias(entry, inode);
+       if (err) {
+               iput(inode);
+               return err;
+       }
 
        fuse_change_entry_timeout(entry, &outarg);
        fuse_invalidate_attr(dir);
@@ -1063,6 +1040,8 @@ static int fuse_access(struct inode *inode, int mask)
        struct fuse_access_in inarg;
        int err;
 
+       BUG_ON(mask & MAY_NOT_BLOCK);
+
        if (fc->no_access)
                return 0;
 
@@ -1150,9 +1129,6 @@ static int fuse_permission(struct inode *inode, int mask)
                   noticed immediately, only after the attribute
                   timeout has expired */
        } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
-               if (mask & MAY_NOT_BLOCK)
-                       return -ECHILD;
-
                err = fuse_access(inode, mask);
        } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
                if (!(inode->i_mode & S_IXUGO)) {
@@ -1280,7 +1256,7 @@ static int fuse_direntplus_link(struct file *file,
        if (!inode)
                goto out;
 
-       alias = fuse_materialise_dentry(dentry, inode);
+       alias = d_materialise_unique(dentry, inode);
        err = PTR_ERR(alias);
        if (IS_ERR(alias))
                goto out;
@@ -1291,6 +1267,8 @@ static int fuse_direntplus_link(struct file *file,
        }
 
 found:
+       if (fc->readdirplus_auto)
+               set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
        fuse_change_entry_timeout(dentry, o);
 
        err = 0;
index d409deafc67b2f6c94fb3fa53b96a21c60585d56..26c33f36179af2061fd57641da00fdf0f60a0a92 100644 (file)
@@ -334,7 +334,8 @@ static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
 
                BUG_ON(req->inode != inode);
                curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
-               if (curr_index == index) {
+               if (curr_index <= index &&
+                   index < curr_index + req->num_pages) {
                        found = true;
                        break;
                }
@@ -1178,9 +1179,10 @@ static inline void fuse_page_descs_length_init(struct fuse_req *req,
                        req->page_descs[i].offset;
 }
 
-static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
+static inline unsigned long fuse_get_user_addr(struct iov_iter *ii)
 {
-       return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+       struct iovec *iov = iov_iter_iovec(ii);
+       return (unsigned long)iov->iov_base + ii->iov_offset;
 }
 
 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
@@ -1269,9 +1271,8 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p)
        return min(npages, FUSE_MAX_PAGES_PER_REQ);
 }
 
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write)
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write)
 {
        struct file *file = io->file;
        struct fuse_file *ff = file->private_data;
@@ -1280,14 +1281,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
        loff_t pos = *ppos;
        ssize_t res = 0;
        struct fuse_req *req;
-       struct iov_iter ii;
-
-       iov_iter_init(&ii, iov, nr_segs, count, 0);
 
        if (io->async)
-               req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req_for_background(fc, fuse_iter_npages(ii));
        else
-               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req(fc, fuse_iter_npages(ii));
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1295,7 +1293,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                size_t nres;
                fl_owner_t owner = current->files;
                size_t nbytes = min(count, nmax);
-               int err = fuse_get_user_pages(req, &ii, &nbytes, write);
+               int err = fuse_get_user_pages(req, ii, &nbytes, write);
                if (err) {
                        res = err;
                        break;
@@ -1325,9 +1323,9 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                        fuse_put_request(fc, req);
                        if (io->async)
                                req = fuse_get_req_for_background(fc,
-                                       fuse_iter_npages(&ii));
+                                       fuse_iter_npages(ii));
                        else
-                               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+                               req = fuse_get_req(fc, fuse_iter_npages(ii));
                        if (IS_ERR(req))
                                break;
                }
@@ -1341,10 +1339,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
 }
 EXPORT_SYMBOL_GPL(fuse_direct_io);
 
-static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
-                                 const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t *ppos,
-                                 size_t count)
+static ssize_t __fuse_direct_read(struct fuse_io_priv *io, struct iov_iter *ii,
+                                 loff_t *ppos, size_t count)
 {
        ssize_t res;
        struct file *file = io->file;
@@ -1353,7 +1349,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
        if (is_bad_inode(inode))
                return -EIO;
 
-       res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
+       res = fuse_direct_io(io, ii, count, ppos, 0);
 
        fuse_invalidate_attr(inode);
 
@@ -1365,21 +1361,24 @@ static ssize_t fuse_direct_read(struct file *file, char __user *buf,
 {
        struct fuse_io_priv io = { .async = 0, .file = file };
        struct iovec iov = { .iov_base = buf, .iov_len = count };
-       return __fuse_direct_read(&io, &iov, 1, ppos, count);
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return __fuse_direct_read(&io, &ii, ppos, count);
 }
 
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t *ppos)
+static ssize_t __fuse_direct_write(struct fuse_io_priv *io, struct iov_iter *ii,
+                                  loff_t *ppos)
 {
        struct file *file = io->file;
        struct inode *inode = file_inode(file);
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        ssize_t res;
 
        res = generic_write_checks(file, ppos, &count, 0);
        if (!res)
-               res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
+               res = fuse_direct_io(io, ii, count, ppos, 1);
 
        fuse_invalidate_attr(inode);
 
@@ -1390,6 +1389,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+       struct iov_iter ii;
        struct inode *inode = file_inode(file);
        ssize_t res;
        struct fuse_io_priv io = { .async = 0, .file = file };
@@ -1397,9 +1397,11 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
        if (is_bad_inode(inode))
                return -EIO;
 
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
        /* Don't allow parallel writes to the same file */
        mutex_lock(&inode->i_mutex);
-       res = __fuse_direct_write(&io, &iov, 1, ppos);
+       res = __fuse_direct_write(&io, &ii, ppos);
        if (res > 0)
                fuse_write_update_size(inode, *ppos);
        mutex_unlock(&inode->i_mutex);
@@ -1409,8 +1411,13 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
 
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
-       __free_page(req->pages[0]);
-       fuse_file_put(req->ff, false);
+       int i;
+
+       for (i = 0; i < req->num_pages; i++)
+               __free_page(req->pages[i]);
+
+       if (req->ff)
+               fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1418,30 +1425,34 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
        struct inode *inode = req->inode;
        struct fuse_inode *fi = get_fuse_inode(inode);
        struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+       int i;
 
        list_del(&req->writepages_entry);
-       dec_bdi_stat(bdi, BDI_WRITEBACK);
-       dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
-       bdi_writeout_inc(bdi);
+       for (i = 0; i < req->num_pages; i++) {
+               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+               bdi_writeout_inc(bdi);
+       }
        wake_up(&fi->page_waitq);
 }
 
 /* Called under fc->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
+                               loff_t size)
 __releases(fc->lock)
 __acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
-       loff_t size = i_size_read(req->inode);
        struct fuse_write_in *inarg = &req->misc.write.in;
+       __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
 
        if (!fc->connected)
                goto out_free;
 
-       if (inarg->offset + PAGE_CACHE_SIZE <= size) {
-               inarg->size = PAGE_CACHE_SIZE;
+       if (inarg->offset + data_size <= size) {
+               inarg->size = data_size;
        } else if (inarg->offset < size) {
-               inarg->size = size & (PAGE_CACHE_SIZE - 1);
+               inarg->size = size - inarg->offset;
        } else {
                /* Got truncated off completely */
                goto out_free;
@@ -1472,12 +1483,13 @@ __acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
+       size_t crop = i_size_read(inode);
        struct fuse_req *req;
 
        while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
                req = list_entry(fi->queued_writes.next, struct fuse_req, list);
                list_del_init(&req->list);
-               fuse_send_writepage(fc, req);
+               fuse_send_writepage(fc, req, crop);
        }
 }
 
@@ -1488,12 +1500,62 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
 
        mapping_set_error(inode->i_mapping, req->out.h.error);
        spin_lock(&fc->lock);
+       while (req->misc.write.next) {
+               struct fuse_conn *fc = get_fuse_conn(inode);
+               struct fuse_write_in *inarg = &req->misc.write.in;
+               struct fuse_req *next = req->misc.write.next;
+               req->misc.write.next = next->misc.write.next;
+               next->misc.write.next = NULL;
+               next->ff = fuse_file_get(req->ff);
+               list_add(&next->writepages_entry, &fi->writepages);
+
+               /*
+                * Skip fuse_flush_writepages() to make it easy to crop requests
+                * based on primary request size.
+                *
+                * 1st case (trivial): there are no concurrent activities using
+                * fuse_set/release_nowrite.  Then we're on safe side because
+                * fuse_flush_writepages() would call fuse_send_writepage()
+                * anyway.
+                *
+                * 2nd case: someone called fuse_set_nowrite and it is waiting
+                * now for completion of all in-flight requests.  This happens
+                * rarely and no more than once per page, so this should be
+                * okay.
+                *
+                * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
+                * of fuse_set_nowrite..fuse_release_nowrite section.  The fact
+                * that fuse_set_nowrite returned implies that all in-flight
+                * requests were completed along with all of their secondary
+                * requests.  Further primary requests are blocked by negative
+                * writectr.  Hence there cannot be any in-flight requests and
+                * no invocations of fuse_writepage_end() while we're in
+                * fuse_set_nowrite..fuse_release_nowrite section.
+                */
+               fuse_send_writepage(fc, next, inarg->offset + inarg->size);
+       }
        fi->writectr--;
        fuse_writepage_finish(fc, req);
        spin_unlock(&fc->lock);
        fuse_writepage_free(fc, req);
 }
 
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+                                            struct fuse_inode *fi)
+{
+       struct fuse_file *ff = NULL;
+
+       spin_lock(&fc->lock);
+       if (!WARN_ON(list_empty(&fi->write_files))) {
+               ff = list_entry(fi->write_files.next, struct fuse_file,
+                               write_entry);
+               fuse_file_get(ff);
+       }
+       spin_unlock(&fc->lock);
+
+       return ff;
+}
+
 static int fuse_writepage_locked(struct page *page)
 {
        struct address_space *mapping = page->mapping;
@@ -1501,8 +1563,8 @@ static int fuse_writepage_locked(struct page *page)
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
        struct fuse_req *req;
-       struct fuse_file *ff;
        struct page *tmp_page;
+       int error = -ENOMEM;
 
        set_page_writeback(page);
 
@@ -1515,16 +1577,16 @@ static int fuse_writepage_locked(struct page *page)
        if (!tmp_page)
                goto err_free;
 
-       spin_lock(&fc->lock);
-       BUG_ON(list_empty(&fi->write_files));
-       ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
-       req->ff = fuse_file_get(ff);
-       spin_unlock(&fc->lock);
+       error = -EIO;
+       req->ff = fuse_write_file_get(fc, fi);
+       if (!req->ff)
+               goto err_free;
 
-       fuse_write_fill(req, ff, page_offset(page), 0);
+       fuse_write_fill(req, req->ff, page_offset(page), 0);
 
        copy_highpage(tmp_page, page);
        req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+       req->misc.write.next = NULL;
        req->in.argpages = 1;
        req->num_pages = 1;
        req->pages[0] = tmp_page;
@@ -1550,19 +1612,263 @@ err_free:
        fuse_request_free(req);
 err:
        end_page_writeback(page);
-       return -ENOMEM;
+       return error;
 }
 
 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
 {
        int err;
 
+       if (fuse_page_is_writeback(page->mapping->host, page->index)) {
+               /*
+                * ->writepages() should be called for sync() and friends.  We
+                * should only get here on direct reclaim and then we are
+                * allowed to skip a page which is already in flight
+                */
+               WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+               redirty_page_for_writepage(wbc, page);
+               return 0;
+       }
+
        err = fuse_writepage_locked(page);
        unlock_page(page);
 
        return err;
 }
 
+struct fuse_fill_wb_data {
+       struct fuse_req *req;
+       struct fuse_file *ff;
+       struct inode *inode;
+       struct page **orig_pages;
+};
+
+static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+{
+       struct fuse_req *req = data->req;
+       struct inode *inode = data->inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       int num_pages = req->num_pages;
+       int i;
+
+       req->ff = fuse_file_get(data->ff);
+       spin_lock(&fc->lock);
+       list_add_tail(&req->list, &fi->queued_writes);
+       fuse_flush_writepages(inode);
+       spin_unlock(&fc->lock);
+
+       for (i = 0; i < num_pages; i++)
+               end_page_writeback(data->orig_pages[i]);
+}
+
+static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+                                    struct page *page)
+{
+       struct fuse_conn *fc = get_fuse_conn(new_req->inode);
+       struct fuse_inode *fi = get_fuse_inode(new_req->inode);
+       struct fuse_req *tmp;
+       struct fuse_req *old_req;
+       bool found = false;
+       pgoff_t curr_index;
+
+       BUG_ON(new_req->num_pages != 0);
+
+       spin_lock(&fc->lock);
+       list_del(&new_req->writepages_entry);
+       list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
+               BUG_ON(old_req->inode != new_req->inode);
+               curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               if (curr_index <= page->index &&
+                   page->index < curr_index + old_req->num_pages) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               list_add(&new_req->writepages_entry, &fi->writepages);
+               goto out_unlock;
+       }
+
+       new_req->num_pages = 1;
+       for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
+               BUG_ON(tmp->inode != new_req->inode);
+               curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               if (tmp->num_pages == 1 &&
+                   curr_index == page->index) {
+                       old_req = tmp;
+               }
+       }
+
+       if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
+                                       old_req->state == FUSE_REQ_PENDING)) {
+               struct backing_dev_info *bdi = page->mapping->backing_dev_info;
+
+               copy_highpage(old_req->pages[0], page);
+               spin_unlock(&fc->lock);
+
+               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+               bdi_writeout_inc(bdi);
+               fuse_writepage_free(fc, new_req);
+               fuse_request_free(new_req);
+               goto out;
+       } else {
+               new_req->misc.write.next = old_req->misc.write.next;
+               old_req->misc.write.next = new_req;
+       }
+out_unlock:
+       spin_unlock(&fc->lock);
+out:
+       return found;
+}
+
+static int fuse_writepages_fill(struct page *page,
+               struct writeback_control *wbc, void *_data)
+{
+       struct fuse_fill_wb_data *data = _data;
+       struct fuse_req *req = data->req;
+       struct inode *inode = data->inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct page *tmp_page;
+       bool is_writeback;
+       int err;
+
+       if (!data->ff) {
+               err = -EIO;
+               data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+               if (!data->ff)
+                       goto out_unlock;
+       }
+
+       /*
+        * Being under writeback is unlikely but possible.  For example direct
+        * read to an mmaped fuse file will set the page dirty twice; once when
+        * the pages are faulted with get_user_pages(), and then after the read
+        * completed.
+        */
+       is_writeback = fuse_page_is_writeback(inode, page->index);
+
+       if (req && req->num_pages &&
+           (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+            (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+            data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+               fuse_writepages_send(data);
+               data->req = NULL;
+       }
+       err = -ENOMEM;
+       tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       if (!tmp_page)
+               goto out_unlock;
+
+       /*
+        * The page must not be redirtied until the writeout is completed
+        * (i.e. userspace has sent a reply to the write request).  Otherwise
+        * there could be more than one temporary page instance for each real
+        * page.
+        *
+        * This is ensured by holding the page lock in page_mkwrite() while
+        * checking fuse_page_is_writeback().  We already hold the page lock
+        * since clear_page_dirty_for_io() and keep it held until we add the
+        * request to the fi->writepages list and increment req->num_pages.
+        * After this fuse_page_is_writeback() will indicate that the page is
+        * under writeback, so we can release the page lock.
+        */
+       if (data->req == NULL) {
+               struct fuse_inode *fi = get_fuse_inode(inode);
+
+               err = -ENOMEM;
+               req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+               if (!req) {
+                       __free_page(tmp_page);
+                       goto out_unlock;
+               }
+
+               fuse_write_fill(req, data->ff, page_offset(page), 0);
+               req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+               req->misc.write.next = NULL;
+               req->in.argpages = 1;
+               req->background = 1;
+               req->num_pages = 0;
+               req->end = fuse_writepage_end;
+               req->inode = inode;
+
+               spin_lock(&fc->lock);
+               list_add(&req->writepages_entry, &fi->writepages);
+               spin_unlock(&fc->lock);
+
+               data->req = req;
+       }
+       set_page_writeback(page);
+
+       copy_highpage(tmp_page, page);
+       req->pages[req->num_pages] = tmp_page;
+       req->page_descs[req->num_pages].offset = 0;
+       req->page_descs[req->num_pages].length = PAGE_SIZE;
+
+       inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
+       inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+
+       err = 0;
+       if (is_writeback && fuse_writepage_in_flight(req, page)) {
+               end_page_writeback(page);
+               data->req = NULL;
+               goto out_unlock;
+       }
+       data->orig_pages[req->num_pages] = page;
+
+       /*
+        * Protected by fc->lock against concurrent access by
+        * fuse_page_is_writeback().
+        */
+       spin_lock(&fc->lock);
+       req->num_pages++;
+       spin_unlock(&fc->lock);
+
+out_unlock:
+       unlock_page(page);
+
+       return err;
+}
+
+static int fuse_writepages(struct address_space *mapping,
+                          struct writeback_control *wbc)
+{
+       struct inode *inode = mapping->host;
+       struct fuse_fill_wb_data data;
+       int err;
+
+       err = -EIO;
+       if (is_bad_inode(inode))
+               goto out;
+
+       data.inode = inode;
+       data.req = NULL;
+       data.ff = NULL;
+
+       err = -ENOMEM;
+       data.orig_pages = kzalloc(sizeof(struct page *) *
+                                 FUSE_MAX_PAGES_PER_REQ,
+                                 GFP_NOFS);
+       if (!data.orig_pages)
+               goto out;
+
+       err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
+       if (data.req) {
+               /* Ignore errors if we can write at least one page */
+               BUG_ON(!data.req->num_pages);
+               fuse_writepages_send(&data);
+               err = 0;
+       }
+       if (data.ff)
+               fuse_file_put(data.ff, false);
+
+       kfree(data.orig_pages);
+out:
+       return err;
+}
+
 static int fuse_launder_page(struct page *page)
 {
        int err = 0;
@@ -1602,14 +1908,17 @@ static void fuse_vma_close(struct vm_area_struct *vma)
 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       /*
-        * Don't use page->mapping as it may become NULL from a
-        * concurrent truncate.
-        */
-       struct inode *inode = vma->vm_file->f_mapping->host;
+       struct inode *inode = file_inode(vma->vm_file);
+
+       file_update_time(vma->vm_file);
+       lock_page(page);
+       if (page->mapping != inode->i_mapping) {
+               unlock_page(page);
+               return VM_FAULT_NOPAGE;
+       }
 
        fuse_wait_on_page_writeback(inode, page->index);
-       return 0;
+       return VM_FAULT_LOCKED;
 }
 
 static const struct vm_operations_struct fuse_file_vm_ops = {
@@ -1868,30 +2177,17 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
        while (iov_iter_count(&ii)) {
                struct page *page = pages[page_idx++];
                size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
-               void *kaddr;
-
-               kaddr = kmap(page);
-
-               while (todo) {
-                       char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
-                       size_t iov_len = ii.iov->iov_len - ii.iov_offset;
-                       size_t copy = min(todo, iov_len);
-                       size_t left;
-
-                       if (!to_user)
-                               left = copy_from_user(kaddr, uaddr, copy);
-                       else
-                               left = copy_to_user(uaddr, kaddr, copy);
+               size_t left;
 
-                       if (unlikely(left))
-                               return -EFAULT;
+               if (!to_user)
+                       left = iov_iter_copy_from_user(page, &ii, 0, todo);
+               else
+                       left = iov_iter_copy_to_user(page, &ii, 0, todo);
 
-                       iov_iter_advance(&ii, copy);
-                       todo -= copy;
-                       kaddr += copy;
-               }
+               if (unlikely(left))
+                       return -EFAULT;
 
-               kunmap(page);
+               iov_iter_advance(&ii, todo);
        }
 
        return 0;
@@ -2385,8 +2681,8 @@ static inline loff_t fuse_round_up(loff_t off)
 }
 
 static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *ii,
+                       loff_t offset)
 {
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
@@ -2395,7 +2691,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t pos = 0;
        struct inode *inode;
        loff_t i_size;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        struct fuse_io_priv *io;
 
        pos = offset;
@@ -2436,9 +2732,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                io->async = false;
 
        if (rw == WRITE)
-               ret = __fuse_direct_write(io, iov, nr_segs, &pos);
+               ret = __fuse_direct_write(io, ii, &pos);
        else
-               ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
+               ret = __fuse_direct_read(io, ii, &pos, count);
 
        if (io->async) {
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
@@ -2467,6 +2763,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
 {
        struct fuse_file *ff = file->private_data;
        struct inode *inode = file->f_inode;
+       struct fuse_inode *fi = get_fuse_inode(inode);
        struct fuse_conn *fc = ff->fc;
        struct fuse_req *req;
        struct fuse_fallocate_in inarg = {
@@ -2484,10 +2781,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
 
        if (lock_inode) {
                mutex_lock(&inode->i_mutex);
-               if (mode & FALLOC_FL_PUNCH_HOLE)
-                       fuse_set_nowrite(inode);
+               if (mode & FALLOC_FL_PUNCH_HOLE) {
+                       loff_t endbyte = offset + length - 1;
+                       err = filemap_write_and_wait_range(inode->i_mapping,
+                                                          offset, endbyte);
+                       if (err)
+                               goto out;
+
+                       fuse_sync_writes(inode);
+               }
        }
 
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
        req = fuse_get_req_nopages(fc);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
@@ -2520,11 +2827,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        fuse_invalidate_attr(inode);
 
 out:
-       if (lock_inode) {
-               if (mode & FALLOC_FL_PUNCH_HOLE)
-                       fuse_release_nowrite(inode);
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+
+       if (lock_inode)
                mutex_unlock(&inode->i_mutex);
-       }
 
        return err;
 }
@@ -2570,6 +2877,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
 static const struct address_space_operations fuse_file_aops  = {
        .readpage       = fuse_readpage,
        .writepage      = fuse_writepage,
+       .writepages     = fuse_writepages,
        .launder_page   = fuse_launder_page,
        .readpages      = fuse_readpages,
        .set_page_dirty = __set_page_dirty_nobuffers,
index 5ced199b50bbb19a9dcd8fb42313de4afba26d6c..04c6084b0cf98496822a24b293b7ff815de00531 100644 (file)
@@ -115,6 +115,8 @@ struct fuse_inode {
 enum {
        /** Advise readdirplus  */
        FUSE_I_ADVISE_RDPLUS,
+       /** Initialized with readdirplus */
+       FUSE_I_INIT_RDPLUS,
        /** An operation changing file size is in progress  */
        FUSE_I_SIZE_UNSTABLE,
 };
@@ -319,6 +321,7 @@ struct fuse_req {
                struct {
                        struct fuse_write_in in;
                        struct fuse_write_out out;
+                       struct fuse_req *next;
                } write;
                struct fuse_notify_retrieve_in retrieve_in;
                struct fuse_lk_in lk_in;
@@ -372,9 +375,6 @@ struct fuse_conn {
        /** Lock protecting accessess to  members of this structure */
        spinlock_t lock;
 
-       /** Mutex protecting against directory alias creation */
-       struct mutex inst_mutex;
-
        /** Refcount */
        atomic_t count;
 
@@ -856,9 +856,8 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
 
 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
                 bool isdir);
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write);
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write);
 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
                   unsigned int flags);
 long fuse_ioctl_common(struct file *file, unsigned int cmd,
index a8ce6dab60a0b0e3279b6996c8462610c0cbef14..1c15613c64f8d0e768596a2e35ee81f3d486d7cc 100644 (file)
@@ -565,7 +565,6 @@ void fuse_conn_init(struct fuse_conn *fc)
 {
        memset(fc, 0, sizeof(*fc));
        spin_lock_init(&fc->lock);
-       mutex_init(&fc->inst_mutex);
        init_rwsem(&fc->killsb);
        atomic_set(&fc->count, 1);
        init_waitqueue_head(&fc->waitq);
@@ -596,7 +595,6 @@ void fuse_conn_put(struct fuse_conn *fc)
        if (atomic_dec_and_test(&fc->count)) {
                if (fc->destroy_req)
                        fuse_request_free(fc->destroy_req);
-               mutex_destroy(&fc->inst_mutex);
                fc->release(fc);
        }
 }
index 1f7d8057ea68d1c7214d3db0a6446aa248888eea..01a2aa5f25a149c312f87d4ad6525ac0c1d808a6 100644 (file)
@@ -611,12 +611,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .aflags = 0, };
                error = gfs2_quota_lock_check(ip);
                if (error)
                        goto out_unlock;
 
                requested = data_blocks + ind_blocks;
-               error = gfs2_inplace_reserve(ip, requested, 0);
+               ap.target = requested;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto out_qunlock;
        }
@@ -979,8 +981,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1004,8 +1005,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
-       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                 offset, nr_segs, gfs2_get_block_direct,
+       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                 offset, gfs2_get_block_direct,
                                  NULL, NULL, 0);
 out:
        gfs2_glock_dq(&gh);
index 62a65fc448dcedf5009f85a233cb43ad7cd4b442..fe0500c0af7aab88ca4276234df6894cca7290b3 100644 (file)
@@ -1216,6 +1216,7 @@ static int do_grow(struct inode *inode, u64 size)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_alloc_parms ap = { .target = 1, };
        struct buffer_head *dibh;
        int error;
        int unstuff = 0;
@@ -1226,7 +1227,7 @@ static int do_grow(struct inode *inode, u64 size)
                if (error)
                        return error;
 
-               error = gfs2_inplace_reserve(ip, 1, 0);
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto do_grow_qunlock;
                unstuff = 1;
@@ -1279,6 +1280,7 @@ do_grow_qunlock:
 
 int gfs2_setattr_size(struct inode *inode, u64 newsize)
 {
+       struct gfs2_inode *ip = GFS2_I(inode);
        int ret;
        u64 oldsize;
 
@@ -1294,7 +1296,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
 
        inode_dio_wait(inode);
 
-       ret = gfs2_rs_alloc(GFS2_I(inode));
+       ret = gfs2_rs_alloc(ip);
        if (ret)
                goto out;
 
@@ -1304,6 +1306,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
                goto out;
        }
 
+       gfs2_rs_deltree(ip->i_res);
        ret = do_shrink(inode, oldsize, newsize);
 out:
        put_write_access(inode);
index 0621b46d474d0e6d82157e6701b3e49449839908..0838913ca5687dc0e2abaaeb6ae4159a8458cce7 100644 (file)
@@ -383,6 +383,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = file_inode(vma->vm_file);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned long last_index;
        u64 pos = page->index << PAGE_CACHE_SHIFT;
        unsigned int data_blocks, ind_blocks, rblocks;
@@ -430,7 +431,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto out_unlock;
        gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
-       ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+       ap.target = data_blocks + ind_blocks;
+       ret = gfs2_inplace_reserve(ip, &ap);
        if (ret)
                goto out_quota_unlock;
 
@@ -620,7 +622,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
        if (!(file->f_mode & FMODE_WRITE))
                return 0;
 
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, &inode->i_writecount);
        return 0;
 }
 
@@ -681,10 +683,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 }
 
 /**
- * gfs2_file_aio_write - Perform a write to a file
+ * gfs2_file_write_iter - Perform a write to a file
  * @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
+ * @iter: The data to write
  * @pos: The file position
  *
  * We have to do a lock/unlock here to refresh the inode size for
@@ -694,11 +695,11 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
  *
  */
 
-static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t pos)
+static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       size_t writesize = iov_length(iov, nr_segs);
+       size_t writesize = iov_iter_count(iter);
        struct gfs2_inode *ip = GFS2_I(file_inode(file));
        int ret;
 
@@ -717,7 +718,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                gfs2_glock_dq_uninit(&gh);
        }
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -800,6 +801,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
        struct inode *inode = file_inode(file);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
        loff_t bytes, max_bytes;
        int error;
@@ -850,7 +852,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
 retry:
                gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 
-               error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+               ap.target = data_blocks + ind_blocks;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error) {
                        if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
                                bytes >>= 1;
@@ -1049,9 +1052,9 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 const struct file_operations gfs2_file_fops = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
@@ -1081,9 +1084,9 @@ const struct file_operations gfs2_dir_fops = {
 const struct file_operations gfs2_file_fops_nolock = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
index c2f41b4d00b9872ccfb4f5f23f988d46f154df32..e66a8009aff16d66b1179bba0ffc3a266ff923f6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/bit_spinlock.h>
 #include <linux/percpu.h>
 #include <linux/list_sort.h>
+#include <linux/lockref.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
  *
  */
 
-void gfs2_glock_hold(struct gfs2_glock *gl)
+static void gfs2_glock_hold(struct gfs2_glock *gl)
 {
-       GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
-       atomic_inc(&gl->gl_ref);
+       GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
+       lockref_get(&gl->gl_lockref);
 }
 
 /**
@@ -186,20 +187,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
        spin_unlock(&lru_lock);
 }
 
-/**
- * gfs2_glock_put_nolock() - Decrement reference count on glock
- * @gl: The glock to put
- *
- * This function should only be used if the caller has its own reference
- * to the glock, in addition to the one it is dropping.
- */
-
-void gfs2_glock_put_nolock(struct gfs2_glock *gl)
-{
-       if (atomic_dec_and_test(&gl->gl_ref))
-               GLOCK_BUG_ON(gl, 1);
-}
-
 /**
  * gfs2_glock_put() - Decrement reference count on glock
  * @gl: The glock to put
@@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct address_space *mapping = gfs2_glock2aspace(gl);
 
-       if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
-               __gfs2_glock_remove_from_lru(gl);
-               spin_unlock(&lru_lock);
-               spin_lock_bucket(gl->gl_hash);
-               hlist_bl_del_rcu(&gl->gl_list);
-               spin_unlock_bucket(gl->gl_hash);
-               GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
-               GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
-               trace_gfs2_glock_put(gl);
-               sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
-       }
+       if (lockref_put_or_lock(&gl->gl_lockref))
+               return;
+
+       lockref_mark_dead(&gl->gl_lockref);
+
+       spin_lock(&lru_lock);
+       __gfs2_glock_remove_from_lru(gl);
+       spin_unlock(&lru_lock);
+       spin_unlock(&gl->gl_lockref.lock);
+       spin_lock_bucket(gl->gl_hash);
+       hlist_bl_del_rcu(&gl->gl_list);
+       spin_unlock_bucket(gl->gl_hash);
+       GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+       GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+       trace_gfs2_glock_put(gl);
+       sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 }
 
 /**
@@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
                        continue;
                if (gl->gl_sbd != sdp)
                        continue;
-               if (atomic_inc_not_zero(&gl->gl_ref))
+               if (lockref_get_not_dead(&gl->gl_lockref))
                        return gl;
        }
 
@@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
        held2 = (new_state != LM_ST_UNLOCKED);
 
        if (held1 != held2) {
+               GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
                if (held2)
-                       gfs2_glock_hold(gl);
+                       gl->gl_lockref.count++;
                else
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
        }
        if (held1 && held2 && list_empty(&gl->gl_holders))
                clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -626,9 +619,9 @@ out:
 out_sched:
        clear_bit(GLF_LOCK, &gl->gl_flags);
        smp_mb__after_clear_bit();
-       gfs2_glock_hold(gl);
+       gl->gl_lockref.count++;
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-               gfs2_glock_put_nolock(gl);
+               gl->gl_lockref.count--;
        return;
 
 out_unlock:
@@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        gl->gl_sbd = sdp;
        gl->gl_flags = 0;
        gl->gl_name = name;
-       atomic_set(&gl->gl_ref, 1);
+       gl->gl_lockref.count = 1;
        gl->gl_state = LM_ST_UNLOCKED;
        gl->gl_target = LM_ST_UNLOCKED;
        gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
                }
        }
 
-       spin_unlock(&gl->gl_spin);
+       gl->gl_lockref.count++;
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       smp_wmb();
-       gfs2_glock_hold(gl);
+       spin_unlock(&gl->gl_spin);
+
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
 }
@@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
        while(!list_empty(list)) {
                gl = list_entry(list->next, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
+               if (!spin_trylock(&gl->gl_spin)) {
+                       list_add(&gl->gl_lru, &lru_list);
+                       atomic_inc(&lru_count);
+                       continue;
+               }
                clear_bit(GLF_LRU, &gl->gl_flags);
-               gfs2_glock_hold(gl);
                spin_unlock(&lru_lock);
-               spin_lock(&gl->gl_spin);
+               gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
                WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
                spin_unlock(&gl->gl_spin);
                spin_lock(&lru_lock);
        }
@@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
 
        rcu_read_lock();
        hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
-               if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
+               if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
                        examiner(gl);
        }
        rcu_read_unlock();
@@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
                  state2str(gl->gl_demote_state), dtime,
                  atomic_read(&gl->gl_ail_count),
                  atomic_read(&gl->gl_revokes),
-                 atomic_read(&gl->gl_ref), gl->gl_hold_time);
+                 (int)gl->gl_lockref.count, gl->gl_hold_time);
 
        list_for_each_entry(gh, &gl->gl_holders, gh_list) {
                error = dump_holder(seq, gh);
@@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
                        gi->nhash = 0;
                }
        /* Skip entries for other sb and dead entries */
-       } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
+       } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
 
        return 0;
 }
index 69f66e3d22bf512787b3b9db403e0c8c91babd59..6647d77366ba097c4c98f482bcef1ab980704baa 100644 (file)
@@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
 extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                          const struct gfs2_glock_operations *glops,
                          int create, struct gfs2_glock **glp);
-extern void gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
 extern void gfs2_glock_put(struct gfs2_glock *gl);
 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
                             unsigned flags, struct gfs2_holder *gh);
index e2e0a90396e7823da9aa47c44a727d5e75751cc6..db908f697139cfffbca462d3a7528e13139576b5 100644 (file)
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
 
        if (gl->gl_demote_state == LM_ST_UNLOCKED &&
            gl->gl_state == LM_ST_SHARED && ip) {
-               gfs2_glock_hold(gl);
+               gl->gl_lockref.count++;
                if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
        }
 }
 
index 26aabd7caba7edfd56c9d611df1732ac6b16a6a1..bb88e417231f88f95c4801d582af0cf254cf8f7e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rbtree.h>
 #include <linux/ktime.h>
 #include <linux/percpu.h>
+#include <linux/lockref.h>
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
@@ -71,6 +72,7 @@ struct gfs2_bitmap {
        u32 bi_offset;
        u32 bi_start;
        u32 bi_len;
+       u32 bi_blocks;
 };
 
 struct gfs2_rgrpd {
@@ -101,19 +103,25 @@ struct gfs2_rgrpd {
 
 struct gfs2_rbm {
        struct gfs2_rgrpd *rgd;
-       struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
        u32 offset;             /* The offset is bitmap relative */
+       int bii;                /* Bitmap index */
 };
 
+static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
+{
+       return rbm->rgd->rd_bits + rbm->bii;
+}
+
 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
 {
-       return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
+       return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
+               rbm->offset;
 }
 
 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
                               const struct gfs2_rbm *rbm2)
 {
-       return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) && 
+       return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
               (rbm1->offset == rbm2->offset);
 }
 
@@ -278,6 +286,20 @@ struct gfs2_blkreserv {
        unsigned int rs_qa_qd_num;
 };
 
+/*
+ * Allocation parameters
+ * @target: The number of blocks we'd ideally like to allocate
+ * @aflags: The flags (e.g. Orlov flag)
+ *
+ * The intent is to gradually expand this structure over time in
+ * order to give more information, e.g. alignment, min extent size
+ * to the allocation code.
+ */
+struct gfs2_alloc_parms {
+       u32 target;
+       u32 aflags;
+};
+
 enum {
        GLF_LOCK                        = 1,
        GLF_DEMOTE                      = 3,
@@ -300,9 +322,9 @@ struct gfs2_glock {
        struct gfs2_sbd *gl_sbd;
        unsigned long gl_flags;         /* GLF_... */
        struct lm_lockname gl_name;
-       atomic_t gl_ref;
 
-       spinlock_t gl_spin;
+       struct lockref gl_lockref;
+#define gl_spin gl_lockref.lock
 
        /* State fields protected by gl_spin */
        unsigned int gl_state:2,        /* Current state */
@@ -516,7 +538,6 @@ struct gfs2_tune {
 
        unsigned int gt_logd_secs;
 
-       unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
        unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
        unsigned int gt_quota_scale_num; /* Numerator */
        unsigned int gt_quota_scale_den; /* Denominator */
@@ -694,6 +715,7 @@ struct gfs2_sbd {
        struct list_head sd_quota_list;
        atomic_t sd_quota_count;
        struct mutex sd_quota_mutex;
+       struct mutex sd_quota_sync_mutex;
        wait_queue_head_t sd_quota_wait;
        struct list_head sd_trunc_list;
        spinlock_t sd_trunc_lock;
index ced3257f06e84bd24b6d96063a4f88f5e2b81525..1615df16cf4eb9ed5c5c4f20ee46c56bc00d3143 100644 (file)
@@ -379,6 +379,7 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
 static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+       struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
        int error;
        int dblocks = 1;
 
@@ -386,7 +387,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
        if (error)
                goto out;
 
-       error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_quota;
 
@@ -472,6 +473,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
                       struct gfs2_inode *ip, int arq)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+       struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
        int error;
 
        if (arq) {
@@ -479,7 +481,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
                if (error)
                        goto fail_quota_locks;
 
-               error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(dip, &ap);
                if (error)
                        goto fail_quota_locks;
 
@@ -584,17 +586,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (!IS_ERR(inode)) {
                d = d_splice_alias(inode, dentry);
                error = 0;
-               if (file && !IS_ERR(d)) {
-                       if (d == NULL)
-                               d = dentry;
-                       if (S_ISREG(inode->i_mode))
-                               error = finish_open(file, d, gfs2_open_common, opened);
-                       else
+               if (file) {
+                       if (S_ISREG(inode->i_mode)) {
+                               WARN_ON(d != NULL);
+                               error = finish_open(file, dentry, gfs2_open_common, opened);
+                       } else {
                                error = finish_no_open(file, d);
+                       }
+               } else {
+                       dput(d);
                }
                gfs2_glock_dq_uninit(ghs);
-               if (IS_ERR(d))
-                       return PTR_ERR(d);
                return error;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
@@ -713,7 +715,7 @@ fail_gunlock2:
 fail_free_inode:
        if (ip->i_gl)
                gfs2_glock_put(ip->i_gl);
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, NULL);
        free_inode_nonrcu(inode);
        inode = NULL;
 fail_gunlock:
@@ -781,8 +783,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
                error = finish_open(file, dentry, gfs2_open_common, opened);
 
        gfs2_glock_dq_uninit(&gh);
-       if (error)
+       if (error) {
+               dput(d);
                return ERR_PTR(error);
+       }
        return d;
 }
 
@@ -874,11 +878,12 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        error = 0;
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
                error = gfs2_quota_lock_check(dip);
                if (error)
                        goto out_gunlock;
 
-               error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(dip, &ap);
                if (error)
                        goto out_gunlock_q;
 
@@ -1163,14 +1168,16 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        d = __gfs2_lookup(dir, dentry, file, opened);
        if (IS_ERR(d))
                return PTR_ERR(d);
-       if (d == NULL)
-               d = dentry;
-       if (d->d_inode) {
+       if (d != NULL)
+               dentry = d;
+       if (dentry->d_inode) {
                if (!(*opened & FILE_OPENED))
-                       return finish_no_open(file, d);
+                       return finish_no_open(file, dentry);
+               dput(d);
                return 0;
        }
 
+       BUG_ON(d != NULL);
        if (!(flags & O_CREAT))
                return -ENOENT;
 
@@ -1385,11 +1392,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                goto out_gunlock;
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
                error = gfs2_quota_lock_check(ndip);
                if (error)
                        goto out_gunlock;
 
-               error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(ndip, &ap);
                if (error)
                        goto out_gunlock_q;
 
@@ -1506,13 +1514,6 @@ out:
        return NULL;
 }
 
-static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
-{
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               kfree(s);
-}
-
 /**
  * gfs2_permission -
  * @inode: The inode
@@ -1864,7 +1865,7 @@ const struct inode_operations gfs2_dir_iops = {
 const struct inode_operations gfs2_symlink_iops = {
        .readlink = generic_readlink,
        .follow_link = gfs2_follow_link,
-       .put_link = gfs2_put_link,
+       .put_link = kfree_put_link,
        .permission = gfs2_permission,
        .setattr = gfs2_setattr,
        .getattr = gfs2_getattr,
index 19ff5e8c285c4c0764d402a719146f1416d9f35a..82303b4749582cd3c00d402a9f42b9972b1de677 100644 (file)
@@ -51,7 +51,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
 {
        spin_lock_init(&gt->gt_spin);
 
-       gt->gt_quota_simul_sync = 64;
        gt->gt_quota_warn_period = 10;
        gt->gt_quota_scale_num = 1;
        gt->gt_quota_scale_den = 1;
@@ -94,6 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
        INIT_LIST_HEAD(&sdp->sd_quota_list);
        mutex_init(&sdp->sd_quota_mutex);
+       mutex_init(&sdp->sd_quota_sync_mutex);
        init_waitqueue_head(&sdp->sd_quota_wait);
        INIT_LIST_HEAD(&sdp->sd_trunc_list);
        spin_lock_init(&sdp->sd_trunc_lock);
index db441359ee8cd2f31fa4a980afe8c54a3f715447..4a9726aa191f59d1a01a7cff7241ad4a2285d687 100644 (file)
@@ -289,6 +289,26 @@ static void slot_hold(struct gfs2_quota_data *qd)
        spin_unlock(&qd_lru_lock);
 }
 
+static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+                            unsigned int bit, int new_value)
+{
+       unsigned int c, o, b = bit;
+       int old_value;
+
+       c = b / (8 * PAGE_SIZE);
+       b %= 8 * PAGE_SIZE;
+       o = b / 8;
+       b %= 8;
+
+       old_value = (bitmap[c][o] & (1 << b));
+       gfs2_assert_withdraw(sdp, !old_value != !new_value);
+
+       if (new_value)
+               bitmap[c][o] |= 1 << b;
+       else
+               bitmap[c][o] &= ~(1 << b);
+}
+
 static void slot_put(struct gfs2_quota_data *qd)
 {
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
@@ -363,6 +383,25 @@ static void bh_put(struct gfs2_quota_data *qd)
        mutex_unlock(&sdp->sd_quota_mutex);
 }
 
+static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+                        u64 *sync_gen)
+{
+       if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+           !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+           (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+               return 0;
+
+       list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+
+       set_bit(QDF_LOCKED, &qd->qd_flags);
+       gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+       atomic_inc(&qd->qd_count);
+       qd->qd_change_sync = qd->qd_change;
+       gfs2_assert_warn(sdp, qd->qd_slot_count);
+       qd->qd_slot_count++;
+       return 1;
+}
+
 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 {
        struct gfs2_quota_data *qd = NULL;
@@ -377,22 +416,9 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
        spin_lock(&qd_lru_lock);
 
        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
-               if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
-                   !test_bit(QDF_CHANGE, &qd->qd_flags) ||
-                   qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
-                       continue;
-
-               list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
-               set_bit(QDF_LOCKED, &qd->qd_flags);
-               gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
-               atomic_inc(&qd->qd_count);
-               qd->qd_change_sync = qd->qd_change;
-               gfs2_assert_warn(sdp, qd->qd_slot_count);
-               qd->qd_slot_count++;
-               found = 1;
-
-               break;
+               found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
+               if (found)
+                       break;
        }
 
        if (!found)
@@ -416,43 +442,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
        return 0;
 }
 
-static int qd_trylock(struct gfs2_quota_data *qd)
-{
-       struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
-       if (sdp->sd_vfs->s_flags & MS_RDONLY)
-               return 0;
-
-       spin_lock(&qd_lru_lock);
-
-       if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
-           !test_bit(QDF_CHANGE, &qd->qd_flags)) {
-               spin_unlock(&qd_lru_lock);
-               return 0;
-       }
-
-       list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
-       set_bit(QDF_LOCKED, &qd->qd_flags);
-       gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
-       atomic_inc(&qd->qd_count);
-       qd->qd_change_sync = qd->qd_change;
-       gfs2_assert_warn(sdp, qd->qd_slot_count);
-       qd->qd_slot_count++;
-
-       spin_unlock(&qd_lru_lock);
-
-       gfs2_assert_warn(sdp, qd->qd_change_sync);
-       if (bh_get(qd)) {
-               clear_bit(QDF_LOCKED, &qd->qd_flags);
-               slot_put(qd);
-               qd_put(qd);
-               return 0;
-       }
-
-       return 1;
-}
-
 static void qd_unlock(struct gfs2_quota_data *qd)
 {
        gfs2_assert_warn(qd->qd_gl->gl_sbd,
@@ -763,6 +752,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 {
        struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned int data_blocks, ind_blocks;
        struct gfs2_holder *ghs, i_gh;
        unsigned int qx, x;
@@ -815,7 +805,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 
        reserved = 1 + (nalloc * (data_blocks + ind_blocks));
-       error = gfs2_inplace_reserve(ip, reserved, 0);
+       ap.target = reserved;
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_alloc;
 
@@ -1001,9 +992,11 @@ static int need_sync(struct gfs2_quota_data *qd)
 
 void gfs2_quota_unlock(struct gfs2_inode *ip)
 {
+       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_quota_data *qda[4];
        unsigned int count = 0;
        unsigned int x;
+       int found;
 
        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
                goto out;
@@ -1016,9 +1009,25 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
                sync = need_sync(qd);
 
                gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
+               if (!sync)
+                       continue;
+
+               spin_lock(&qd_lru_lock);
+               found = qd_check_sync(sdp, qd, NULL);
+               spin_unlock(&qd_lru_lock);
 
-               if (sync && qd_trylock(qd))
-                       qda[count++] = qd;
+               if (!found)
+                       continue;
+
+               gfs2_assert_warn(sdp, qd->qd_change_sync);
+               if (bh_get(qd)) {
+                       clear_bit(QDF_LOCKED, &qd->qd_flags);
+                       slot_put(qd);
+                       qd_put(qd);
+                       continue;
+               }
+
+               qda[count++] = qd;
        }
 
        if (count) {
@@ -1118,17 +1127,18 @@ int gfs2_quota_sync(struct super_block *sb, int type)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_data **qda;
-       unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
+       unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
        unsigned int num_qd;
        unsigned int x;
        int error = 0;
 
-       sdp->sd_quota_sync_gen++;
-
        qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
        if (!qda)
                return -ENOMEM;
 
+       mutex_lock(&sdp->sd_quota_sync_mutex);
+       sdp->sd_quota_sync_gen++;
+
        do {
                num_qd = 0;
 
@@ -1153,6 +1163,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
                }
        } while (!error && num_qd == max_qd);
 
+       mutex_unlock(&sdp->sd_quota_sync_mutex);
        kfree(qda);
 
        return error;
@@ -1573,10 +1584,12 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
        if (gfs2_is_stuffed(ip))
                alloc_required = 1;
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .aflags = 0, };
                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
                                       &data_blocks, &ind_blocks);
                blocks = 1 + data_blocks + ind_blocks;
-               error = gfs2_inplace_reserve(ip, blocks, 0);
+               ap.target = blocks;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto out_i;
                blocks += gfs2_rg_blocks(ip, blocks);
index 69317435faa723c9288d390407d9343fb6bf1096..4d83abdd5635273b3e0af9589eec83226be249ff 100644 (file)
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
                               unsigned char new_state)
 {
        unsigned char *byte1, *byte2, *end, cur_state;
-       unsigned int buflen = rbm->bi->bi_len;
+       struct gfs2_bitmap *bi = rbm_bi(rbm);
+       unsigned int buflen = bi->bi_len;
        const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
 
-       byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
-       end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
+       byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
+       end = bi->bi_bh->b_data + bi->bi_offset + buflen;
 
        BUG_ON(byte1 >= end);
 
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
                printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
                       "new_state=%d\n", rbm->offset, cur_state, new_state);
                printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
-                      (unsigned long long)rbm->rgd->rd_addr,
-                      rbm->bi->bi_start);
+                      (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
                printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
-                      rbm->bi->bi_offset, rbm->bi->bi_len);
+                      bi->bi_offset, bi->bi_len);
                dump_stack();
                gfs2_consist_rgrpd(rbm->rgd);
                return;
        }
        *byte1 ^= (cur_state ^ new_state) << bit;
 
-       if (do_clone && rbm->bi->bi_clone) {
-               byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
+       if (do_clone && bi->bi_clone) {
+               byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
                cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
                *byte2 ^= (cur_state ^ new_state) << bit;
        }
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
 
 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
 {
-       const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
+       struct gfs2_bitmap *bi = rbm_bi(rbm);
+       const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
        const u8 *byte;
        unsigned int bit;
 
@@ -252,28 +253,52 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
 {
        u64 rblock = block - rbm->rgd->rd_data0;
-       u32 x;
 
        if (WARN_ON_ONCE(rblock > UINT_MAX))
                return -EINVAL;
        if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
                return -E2BIG;
 
-       rbm->bi = rbm->rgd->rd_bits;
+       rbm->bii = 0;
        rbm->offset = (u32)(rblock);
        /* Check if the block is within the first block */
-       if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
+       if (rbm->offset < rbm_bi(rbm)->bi_blocks)
                return 0;
 
        /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
        rbm->offset += (sizeof(struct gfs2_rgrp) -
                        sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
-       x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
-       rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
-       rbm->bi += x;
+       rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+       rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
        return 0;
 }
 
+/**
+ * gfs2_rbm_incr - increment an rbm structure
+ * @rbm: The rbm with rgd already set correctly
+ *
+ * This function takes an existing rbm structure and increments it to the next
+ * viable block offset.
+ *
+ * Returns: If incrementing the offset would cause the rbm to go past the
+ *          end of the rgrp, true is returned, otherwise false.
+ *
+ */
+
+static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
+{
+       if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
+               rbm->offset++;
+               return false;
+       }
+       if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
+               return true;
+
+       rbm->offset = 0;
+       rbm->bii++;
+       return false;
+}
+
 /**
  * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
  * @rbm: Position to search (value/result)
@@ -285,7 +310,6 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
 
 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
 {
-       u64 block;
        u32 n;
        u8 res;
 
@@ -296,8 +320,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
                (*len)--;
                if (*len == 0)
                        return true;
-               block = gfs2_rbm_to_block(rbm);
-               if (gfs2_rbm_from_block(rbm, block + 1))
+               if (gfs2_rbm_incr(rbm))
                        return true;
        }
 
@@ -328,6 +351,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
        u32 chunk_size;
        u8 *ptr, *start, *end;
        u64 block;
+       struct gfs2_bitmap *bi;
 
        if (n_unaligned &&
            gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +360,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
        n_unaligned = len & 3;
        /* Start is now byte aligned */
        while (len > 3) {
-               start = rbm.bi->bi_bh->b_data;
-               if (rbm.bi->bi_clone)
-                       start = rbm.bi->bi_clone;
-               end = start + rbm.bi->bi_bh->b_size;
-               start += rbm.bi->bi_offset;
+               bi = rbm_bi(&rbm);
+               start = bi->bi_bh->b_data;
+               if (bi->bi_clone)
+                       start = bi->bi_clone;
+               end = start + bi->bi_bh->b_size;
+               start += bi->bi_offset;
                BUG_ON(rbm.offset & 3);
                start += (rbm.offset / GFS2_NBBY);
                bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +630,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
        RB_CLEAR_NODE(&rs->rs_node);
 
        if (rs->rs_free) {
+               struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+
                /* return reserved blocks to the rgrp */
                BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
                rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
                rs->rs_free = 0;
-               clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
+               clear_bit(GBF_FULL, &bi->bi_flags);
                smp_mb__after_clear_bit();
        }
 }
@@ -634,14 +661,13 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 /**
  * gfs2_rs_delete - delete a multi-block reservation
  * @ip: The inode for this reservation
+ * @wcount: The inode's write count, or NULL
  *
  */
-void gfs2_rs_delete(struct gfs2_inode *ip)
+void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
 {
-       struct inode *inode = &ip->i_inode;
-
        down_write(&ip->i_rw_mutex);
-       if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
+       if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
                gfs2_rs_deltree(ip->i_res);
                BUG_ON(ip->i_res->rs_free);
                kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -743,18 +769,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
                        bi->bi_offset = sizeof(struct gfs2_rgrp);
                        bi->bi_start = 0;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* header block */
                } else if (x == 0) {
                        bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
                        bi->bi_offset = sizeof(struct gfs2_rgrp);
                        bi->bi_start = 0;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* last block */
                } else if (x + 1 == length) {
                        bytes = bytes_left;
                        bi->bi_offset = sizeof(struct gfs2_meta_header);
                        bi->bi_start = rgd->rd_bitbytes - bytes_left;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* other blocks */
                } else {
                        bytes = sdp->sd_sb.sb_bsize -
@@ -762,6 +791,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
                        bi->bi_offset = sizeof(struct gfs2_meta_header);
                        bi->bi_start = rgd->rd_bitbytes - bytes_left;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                }
 
                bytes_left -= bytes;
@@ -1392,12 +1422,12 @@ static void rs_insert(struct gfs2_inode *ip)
  * rg_mblk_search - find a group of multiple free blocks to form a reservation
  * @rgd: the resource group descriptor
  * @ip: pointer to the inode for which we're reserving blocks
- * @requested: number of blocks required for this allocation
+ * @ap: the allocation parameters
  *
  */
 
 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
-                          unsigned requested)
+                          const struct gfs2_alloc_parms *ap)
 {
        struct gfs2_rbm rbm = { .rgd = rgd, };
        u64 goal;
@@ -1410,7 +1440,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
        if (S_ISDIR(inode->i_mode))
                extlen = 1;
        else {
-               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
                extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
        }
        if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1554,14 +1584,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                         const struct gfs2_inode *ip, bool nowrap)
 {
        struct buffer_head *bh;
-       struct gfs2_bitmap *initial_bi;
+       int initial_bii;
        u32 initial_offset;
        u32 offset;
        u8 *buffer;
-       int index;
        int n = 0;
        int iters = rbm->rgd->rd_length;
        int ret;
+       struct gfs2_bitmap *bi;
 
        /* If we are not starting at the beginning of a bitmap, then we
         * need to add one to the bitmap count to ensure that we search
@@ -1571,52 +1601,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                iters++;
 
        while(1) {
-               if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
+               bi = rbm_bi(rbm);
+               if (test_bit(GBF_FULL, &bi->bi_flags) &&
                    (state == GFS2_BLKST_FREE))
                        goto next_bitmap;
 
-               bh = rbm->bi->bi_bh;
-               buffer = bh->b_data + rbm->bi->bi_offset;
+               bh = bi->bi_bh;
+               buffer = bh->b_data + bi->bi_offset;
                WARN_ON(!buffer_uptodate(bh));
-               if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
-                       buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
+               if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+                       buffer = bi->bi_clone + bi->bi_offset;
                initial_offset = rbm->offset;
-               offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
+               offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
                if (offset == BFITNOENT)
                        goto bitmap_full;
                rbm->offset = offset;
                if (ip == NULL)
                        return 0;
 
-               initial_bi = rbm->bi;
+               initial_bii = rbm->bii;
                ret = gfs2_reservation_check_and_update(rbm, ip, minext);
                if (ret == 0)
                        return 0;
                if (ret > 0) {
-                       n += (rbm->bi - initial_bi);
+                       n += (rbm->bii - initial_bii);
                        goto next_iter;
                }
                if (ret == -E2BIG) {
-                       index = 0;
+                       rbm->bii = 0;
                        rbm->offset = 0;
-                       n += (rbm->bi - initial_bi);
+                       n += (rbm->bii - initial_bii);
                        goto res_covered_end_of_rgrp;
                }
                return ret;
 
 bitmap_full:   /* Mark bitmap as full and fall through */
-               if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
-                       set_bit(GBF_FULL, &rbm->bi->bi_flags);
+               if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
+                       struct gfs2_bitmap *bi = rbm_bi(rbm);
+                       set_bit(GBF_FULL, &bi->bi_flags);
+               }
 
 next_bitmap:   /* Find next bitmap in the rgrp */
                rbm->offset = 0;
-               index = rbm->bi - rbm->rgd->rd_bits;
-               index++;
-               if (index == rbm->rgd->rd_length)
-                       index = 0;
+               rbm->bii++;
+               if (rbm->bii == rbm->rgd->rd_length)
+                       rbm->bii = 0;
 res_covered_end_of_rgrp:
-               rbm->bi = &rbm->rgd->rd_bits[index];
-               if ((index == 0) && nowrap)
+               if ((rbm->bii == 0) && nowrap)
                        break;
                n++;
 next_iter:
@@ -1645,7 +1676,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
        struct gfs2_inode *ip;
        int error;
        int found = 0;
-       struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
+       struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
 
        while (1) {
                down_write(&sdp->sd_log_flush_lock);
@@ -1800,12 +1831,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
 /**
  * gfs2_inplace_reserve - Reserve space in the filesystem
  * @ip: the inode to reserve space for
- * @requested: the number of blocks to be reserved
+ * @ap: the allocation parameters
  *
  * Returns: errno
  */
 
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_rgrpd *begin = NULL;
@@ -1817,17 +1848,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
 
        if (sdp->sd_args.ar_rgrplvb)
                flags |= GL_SKIP;
-       if (gfs2_assert_warn(sdp, requested))
+       if (gfs2_assert_warn(sdp, ap->target))
                return -EINVAL;
        if (gfs2_rs_active(rs)) {
                begin = rs->rs_rbm.rgd;
-               flags = 0; /* Yoda: Do or do not. There is no try */
        } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
                rs->rs_rbm.rgd = begin = ip->i_rgd;
        } else {
                rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
        }
-       if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
+       if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
                skip = gfs2_orlov_skip(ip);
        if (rs->rs_rbm.rgd == NULL)
                return -EBADSLT;
@@ -1869,14 +1899,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
 
                /* Get a reservation if we don't already have one */
                if (!gfs2_rs_active(rs))
-                       rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
+                       rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
 
                /* Skip rgrps when we can't get a reservation on first pass */
                if (!gfs2_rs_active(rs) && (loops < 1))
                        goto check_rgrp;
 
                /* If rgrp has enough free space, use it */
-               if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
+               if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
                        ip->i_rgd = rs->rs_rbm.rgd;
                        return 0;
                }
@@ -1973,14 +2003,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
 
        *n = 1;
        block = gfs2_rbm_to_block(rbm);
-       gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
+       gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
        gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
        block++;
        while (*n < elen) {
                ret = gfs2_rbm_from_block(&pos, block);
                if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
                        break;
-               gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
+               gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
                gfs2_setbit(&pos, true, GFS2_BLKST_USED);
                (*n)++;
                block++;
@@ -2001,6 +2031,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
                                     u32 blen, unsigned char new_state)
 {
        struct gfs2_rbm rbm;
+       struct gfs2_bitmap *bi;
 
        rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
        if (!rbm.rgd) {
@@ -2011,15 +2042,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
 
        while (blen--) {
                gfs2_rbm_from_block(&rbm, bstart);
+               bi = rbm_bi(&rbm);
                bstart++;
-               if (!rbm.bi->bi_clone) {
-                       rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
-                                                  GFP_NOFS | __GFP_NOFAIL);
-                       memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
-                              rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
-                              rbm.bi->bi_len);
+               if (!bi->bi_clone) {
+                       bi->bi_clone = kmalloc(bi->bi_bh->b_size,
+                                              GFP_NOFS | __GFP_NOFAIL);
+                       memcpy(bi->bi_clone + bi->bi_offset,
+                              bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
                }
-               gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
+               gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
                gfs2_setbit(&rbm, false, new_state);
        }
 
@@ -2102,6 +2133,35 @@ out:
        spin_unlock(&rgd->rd_rsspin);
 }
 
+/**
+ * gfs2_set_alloc_start - Set starting point for block allocation
+ * @rbm: The rbm which will be set to the required location
+ * @ip: The gfs2 inode
+ * @dinode: Flag to say if allocation includes a new inode
+ *
+ * This sets the starting point from the reservation if one is active
+ * otherwise it falls back to guessing a start point based on the
+ * inode's goal block or the last allocation point in the rgrp.
+ */
+
+static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
+                                const struct gfs2_inode *ip, bool dinode)
+{
+       u64 goal;
+
+       if (gfs2_rs_active(ip->i_res)) {
+               *rbm = ip->i_res->rs_rbm;
+               return;
+       }
+
+       if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
+               goal = ip->i_goal;
+       else
+               goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
+
+       gfs2_rbm_from_block(rbm, goal);
+}
+
 /**
  * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  * @ip: the inode to allocate the block for
@@ -2120,22 +2180,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
        struct buffer_head *dibh;
        struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
        unsigned int ndata;
-       u64 goal;
        u64 block; /* block, within the file system scope */
        int error;
 
-       if (gfs2_rs_active(ip->i_res))
-               goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
-       else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
-               goal = ip->i_goal;
-       else
-               goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
-
-       gfs2_rbm_from_block(&rbm, goal);
+       gfs2_set_alloc_start(&rbm, ip, dinode);
        error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
 
        if (error == -ENOSPC) {
-               gfs2_rbm_from_block(&rbm, goal);
+               gfs2_set_alloc_start(&rbm, ip, dinode);
                error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
        }
 
index 5b3f4a896e6ca305c7708478d73286b23ea3e3e5..3a10d2ffbbe7b34e93fd37a8dbc68028b8bfa041 100644 (file)
@@ -40,7 +40,7 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
 extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
 
 #define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
 extern void gfs2_inplace_release(struct gfs2_inode *ip);
 
 extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
@@ -48,7 +48,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
 
 extern int gfs2_rs_alloc(struct gfs2_inode *ip);
 extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
+extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
 extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
index e5639dec66c49dc7361ff0ed79828301031b675d..35da5b19c0deb62b59e4124ab4566f4dc9279ef9 100644 (file)
@@ -1526,7 +1526,7 @@ out_unlock:
 out:
        /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, NULL);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index aa5c48044966697c2365712ab53a20d7bc5d8958..d09f6edda0ff8d55f31ef04dba7f4720852f7a8d 100644 (file)
@@ -587,7 +587,6 @@ TUNE_ATTR(max_readahead, 0);
 TUNE_ATTR(complain_secs, 0);
 TUNE_ATTR(statfs_slow, 0);
 TUNE_ATTR(new_files_jdata, 0);
-TUNE_ATTR(quota_simul_sync, 1);
 TUNE_ATTR(statfs_quantum, 1);
 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
 
@@ -597,7 +596,6 @@ static struct attribute *tune_attrs[] = {
        &tune_attr_max_readahead.attr,
        &tune_attr_complain_secs.attr,
        &tune_attr_statfs_slow.attr,
-       &tune_attr_quota_simul_sync.attr,
        &tune_attr_statfs_quantum.attr,
        &tune_attr_quota_scale.attr,
        &tune_attr_new_files_jdata.attr,
index 6402fb69d71bd838d943c4d60d1a8879d15f6217..f7109f689e6132d08b7fcfc6a0c69db63dc0c24f 100644 (file)
@@ -268,23 +268,3 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
        return rv;
 }
 
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
-                     unsigned int bit, int new_value)
-{
-       unsigned int c, o, b = bit;
-       int old_value;
-
-       c = b / (8 * PAGE_SIZE);
-       b %= 8 * PAGE_SIZE;
-       o = b / 8;
-       b %= 8;
-
-       old_value = (bitmap[c][o] & (1 << b));
-       gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
-       if (new_value)
-               bitmap[c][o] |= 1 << b;
-       else
-               bitmap[c][o] &= ~(1 << b);
-}
-
index 80535739ac7b2c5c1f98ac46e9c2cb4faf5d0eb5..b7ffb09b99ea2d231011b92c21fa64a3e0bcfa6d 100644 (file)
@@ -164,8 +164,6 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 #define gfs2_tune_get(sdp, field) \
 gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
 
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
-                     unsigned int bit, int new_value);
 int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
 
 #endif /* __UTIL_DOT_H__ */
index ecd37f30ab91986923c5fa8c0cdf02f1bd3d330b..8c6a6f6bdba978f9c3b37703a79d4c409516557b 100644 (file)
@@ -723,6 +723,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
                             unsigned int blks,
                             ea_skeleton_call_t skeleton_call, void *private)
 {
+       struct gfs2_alloc_parms ap = { .target = blks };
        struct buffer_head *dibh;
        int error;
 
@@ -734,7 +735,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
        if (error)
                return error;
 
-       error = gfs2_inplace_reserve(ip, blks, 0);
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_gunlock_q;
 
index 380ab31b5e0f4870ee966cbcfd5af1e805035f58..3fe7b8e53290f94649865421aa9f458451599f07 100644 (file)
@@ -125,15 +125,14 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                hfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -141,7 +140,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfs_write_failed(mapping, end);
@@ -675,9 +674,9 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 static const struct file_operations hfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfs_file_fsync,
index 37213d075f3c5c9f29029b280b093781ddb526ca..96d7a2ccded28f82ccdadde9bbe0820b4a86c7cb 100644 (file)
@@ -123,14 +123,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                 hfsplus_get_block);
 
        /*
@@ -139,7 +139,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfsplus_write_failed(mapping, end);
@@ -399,9 +399,9 @@ static const struct inode_operations hfsplus_file_inode_operations = {
 static const struct file_operations hfsplus_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfsplus_file_fsync,
index 25437280a2071b8970efe6e394edb97a4433acd8..111a9916bcf518339335369592dcefe1280a29f4 100644 (file)
@@ -388,8 +388,8 @@ static const struct file_operations hostfs_file_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .splice_read    = generic_file_splice_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .write          = do_sync_write,
        .mmap           = generic_file_mmap,
        .open           = hostfs_file_open,
index 67c1a61e09558e0bb632638b0f65d8316ab9b5f2..1ff95c19a4695e5777e2a5749c61b8bec9e889f4 100644 (file)
@@ -198,9 +198,9 @@ const struct file_operations hpfs_file_ops =
 {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = hpfs_file_release,
        .fsync          = hpfs_file_fsync,
index 513e0d859a6c18d7b274a9ebe16afcc9e7f8eca4..6964003cfef80eda8d8b927cac3dffb812729567 100644 (file)
@@ -140,6 +140,10 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
  */
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
+extern ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                          unsigned long nr_segs, loff_t pos);
+extern ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                           unsigned long nr_segs, loff_t pos);
 
 /*
  * splice.c
diff --git a/fs/iov-iter.c b/fs/iov-iter.c
new file mode 100644 (file)
index 0000000..ec461c8
--- /dev/null
@@ -0,0 +1,411 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+
+static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
+                                  size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_to_user_inatomic(buf, vaddr, copy);
+               else
+                       left = __copy_to_user(buf, vaddr, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+static size_t ii_iovec_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_to_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes,
+               int check_access)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       if (check_access) {
+               might_sleep();
+               if (generic_segment_checks(iov, &i->nr_segs, &bytes,
+                                          VERIFY_WRITE))
+                       return 0;
+       }
+
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               /*
+                * Faults on the destination of a read are common, so do it
+                * before taking the kmap.
+                */
+               if (!fault_in_pages_writeable(buf, bytes)) {
+                       kaddr = kmap_atomic(page);
+                       left = __copy_to_user_inatomic(buf, kaddr + offset,
+                                                    bytes);
+                       kunmap_atomic(kaddr);
+                       if (left == 0)
+                               goto success;
+               }
+               kaddr = kmap(page);
+               left = copy_to_user(buf, kaddr + offset, bytes);
+               kunmap(page);
+success:
+               copied = bytes - left;
+       } else {
+               kaddr = kmap(page);
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 0);
+               kunmap(page);
+       }
+       return copied;
+}
+
+#ifdef CONFIG_BLOCK
+/*
+ * As an easily verifiable first pass, we implement all the methods that
+ * copy data to and from bvec pages with one function.  We implement it
+ * all with kmap_atomic().
+ */
+static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
+                                   unsigned long page_offset, size_t bytes,
+                                   int topage)
+{
+       struct bio_vec *bvec = (struct bio_vec *)iter->data;
+       size_t bvec_offset = iter->iov_offset;
+       size_t remaining = bytes;
+       void *bvec_map;
+       void *page_map;
+       size_t copy;
+
+       page_map = kmap_atomic(page);
+
+       BUG_ON(bytes > iter->count);
+       while (remaining) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec_offset >= bvec->bv_len);
+               copy = min(remaining, bvec->bv_len - bvec_offset);
+               bvec_map = kmap_atomic(bvec->bv_page);
+               if (topage)
+                       memcpy(page_map + page_offset,
+                              bvec_map + bvec->bv_offset + bvec_offset,
+                              copy);
+               else
+                       memcpy(bvec_map + bvec->bv_offset + bvec_offset,
+                              page_map + page_offset,
+                              copy);
+               kunmap_atomic(bvec_map);
+               remaining -= copy;
+               bvec_offset += copy;
+               page_offset += copy;
+               if (bvec_offset == bvec->bv_len) {
+                       bvec_offset = 0;
+                       bvec++;
+               }
+       }
+
+       kunmap_atomic(page_map);
+
+       return bytes;
+}
+
+static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
+                                         unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
+                                  unsigned long offset, size_t bytes,
+                                  int check_access)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_from_user_atomic(struct page *page,
+                                           struct iov_iter *i,
+                                           unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
+                                    unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+
+/*
+ * bio_vecs have a stricter structure than iovecs that might have
+ * come from userspace.  There are no zero length bio_vec elements.
+ */
+static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
+{
+       struct bio_vec *bvec = (struct bio_vec *)i->data;
+       size_t offset = i->iov_offset;
+       size_t delta;
+
+       BUG_ON(i->count < bytes);
+       while (bytes) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec->bv_len <= offset);
+               delta = min(bytes, bvec->bv_len - offset);
+               offset += delta;
+               i->count -= delta;
+               bytes -= delta;
+               if (offset == bvec->bv_len) {
+                       bvec++;
+                       offset = 0;
+               }
+       }
+
+       i->data = (unsigned long)bvec;
+       i->iov_offset = offset;
+}
+
+/*
+ * pages pointed to by bio_vecs are always pinned.
+ */
+static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return 0;
+}
+
+static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
+{
+       const struct bio_vec *bvec = (struct bio_vec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, bvec->bv_len - i->iov_offset);
+}
+
+static int ii_bvec_shorten(struct iov_iter *i, size_t count)
+{
+       return -EINVAL;
+}
+
+struct iov_iter_ops ii_bvec_ops = {
+       .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_bvec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_bvec_copy_from_user,
+       .ii_advance = ii_bvec_advance,
+       .ii_fault_in_readable = ii_bvec_fault_in_readable,
+       .ii_single_seg_count = ii_bvec_single_seg_count,
+       .ii_shorten = ii_bvec_shorten,
+};
+EXPORT_SYMBOL(ii_bvec_ops);
+#endif /* CONFIG_BLOCK */
+
+static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
+                                    size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_from_user_inatomic(vaddr, buf, copy);
+               else
+                       left = __copy_from_user(vaddr, buf, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then return the number
+ * of bytes which were copied.
+ */
+static size_t ii_iovec_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 0);
+       }
+       kunmap(page);
+       return copied;
+}
+
+static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+               i->count -= bytes;
+       } else {
+               struct iovec *iov = (struct iovec *)i->data;
+               size_t base = i->iov_offset;
+               unsigned long nr_segs = i->nr_segs;
+
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
+
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               nr_segs--;
+                               base = 0;
+                       }
+               }
+               i->data = (unsigned long)iov;
+               i->iov_offset = base;
+               i->nr_segs = nr_segs;
+       }
+}
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char __user *buf = iov->iov_base + i->iov_offset;
+       bytes = min(bytes, iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
+{
+       const struct iovec *iov = (struct iovec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+
+static int ii_iovec_shorten(struct iov_iter *i, size_t count)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       i->nr_segs = iov_shorten(iov, i->nr_segs, count);
+       i->count = min(i->count, count);
+       return 0;
+}
+
+struct iov_iter_ops ii_iovec_ops = {
+       .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_iovec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_iovec_copy_from_user,
+       .ii_advance = ii_iovec_advance,
+       .ii_fault_in_readable = ii_iovec_fault_in_readable,
+       .ii_single_seg_count = ii_iovec_single_seg_count,
+       .ii_shorten = ii_iovec_shorten,
+};
+EXPORT_SYMBOL(ii_iovec_ops);
index 1506673c087e11ae820245baadc8ae74cb9f01b2..1d7ab8b7d41e4e19c2053ee5be00375a5d9ca3c4 100644 (file)
@@ -51,10 +51,10 @@ const struct file_operations jffs2_file_operations =
 {
        .llseek =       generic_file_llseek,
        .open =         generic_file_open,
-       .read =         do_sync_read,
-       .aio_read =     generic_file_aio_read,
-       .write =        do_sync_write,
-       .aio_write =    generic_file_aio_write,
+       .read =         do_sync_read,
+       .read_iter =    generic_file_read_iter,
+       .write =        do_sync_write,
+       .write_iter =   generic_file_write_iter,
        .unlocked_ioctl=jffs2_ioctl,
        .mmap =         generic_file_readonly_mmap,
        .fsync =        jffs2_fsync,
index dd7442c5835864b0e04bdff5befbd0f020232bfb..040b6c7725ad878d5347274ae1ca4cb76003a83c 100644 (file)
@@ -151,8 +151,8 @@ const struct file_operations jfs_file_operations = {
        .llseek         = generic_file_llseek,
        .write          = do_sync_write,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .splice_write   = generic_file_splice_write,
index f4aab719add57bf354f90536486d88182be0b1b3..51652aaa3dc8c6eb22a7c576b7248b8b66b8c1a5 100644 (file)
@@ -331,15 +331,14 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-       const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                jfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -347,7 +346,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        jfs_write_failed(mapping, end);
index c1a3e603279c9cbe4fb141fc2b9bdcfa1d76a033..7f464c513ba0a85a2fd4fe7923a9799bb319e92b 100644 (file)
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 
        if (insert_inode_locked(inode) < 0) {
                rc = -EINVAL;
-               goto fail_unlock;
+               goto fail_put;
        }
 
        inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 fail_drop:
        dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
-fail_unlock:
        clear_nlink(inode);
        unlock_new_inode(inode);
 fail_put:
index 3a3a9b53bf5a974d3c206f31ae402aaf752dd39c..8c50184931547791f0caa3e22d24cb138395409e 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/vfs.h>
 #include <linux/quotaops.h>
 #include <linux/mutex.h>
+#include <linux/namei.h>
 #include <linux/exportfs.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h> /* sync_mapping_buffers */
@@ -31,6 +32,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
        stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
        return 0;
 }
+EXPORT_SYMBOL(simple_getattr);
 
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -39,6 +41,7 @@ int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_namelen = NAME_MAX;
        return 0;
 }
+EXPORT_SYMBOL(simple_statfs);
 
 /*
  * Retaining negative dentries for an in-memory filesystem just wastes
@@ -66,6 +69,7 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned
        d_add(dentry, NULL);
        return NULL;
 }
+EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
@@ -75,12 +79,14 @@ int dcache_dir_open(struct inode *inode, struct file *file)
 
        return file->private_data ? 0 : -ENOMEM;
 }
+EXPORT_SYMBOL(dcache_dir_open);
 
 int dcache_dir_close(struct inode *inode, struct file *file)
 {
        dput(file->private_data);
        return 0;
 }
+EXPORT_SYMBOL(dcache_dir_close);
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
@@ -123,6 +129,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
        mutex_unlock(&dentry->d_inode->i_mutex);
        return offset;
 }
+EXPORT_SYMBOL(dcache_dir_lseek);
 
 /* Relationship between i_mode and the DT_xxx types */
 static inline unsigned char dt_type(struct inode *inode)
@@ -172,11 +179,13 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
        spin_unlock(&dentry->d_lock);
        return 0;
 }
+EXPORT_SYMBOL(dcache_readdir);
 
 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
 {
        return -EISDIR;
 }
+EXPORT_SYMBOL(generic_read_dir);
 
 const struct file_operations simple_dir_operations = {
        .open           = dcache_dir_open,
@@ -186,10 +195,12 @@ const struct file_operations simple_dir_operations = {
        .iterate        = dcache_readdir,
        .fsync          = noop_fsync,
 };
+EXPORT_SYMBOL(simple_dir_operations);
 
 const struct inode_operations simple_dir_inode_operations = {
        .lookup         = simple_lookup,
 };
+EXPORT_SYMBOL(simple_dir_inode_operations);
 
 static const struct super_operations simple_super_operations = {
        .statfs         = simple_statfs,
@@ -244,6 +255,7 @@ Enomem:
        deactivate_locked_super(s);
        return ERR_PTR(-ENOMEM);
 }
+EXPORT_SYMBOL(mount_pseudo);
 
 int simple_open(struct inode *inode, struct file *file)
 {
@@ -251,6 +263,7 @@ int simple_open(struct inode *inode, struct file *file)
                file->private_data = inode->i_private;
        return 0;
 }
+EXPORT_SYMBOL(simple_open);
 
 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
@@ -263,6 +276,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
        d_instantiate(dentry, inode);
        return 0;
 }
+EXPORT_SYMBOL(simple_link);
 
 int simple_empty(struct dentry *dentry)
 {
@@ -283,6 +297,7 @@ out:
        spin_unlock(&dentry->d_lock);
        return ret;
 }
+EXPORT_SYMBOL(simple_empty);
 
 int simple_unlink(struct inode *dir, struct dentry *dentry)
 {
@@ -293,6 +308,7 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
        dput(dentry);
        return 0;
 }
+EXPORT_SYMBOL(simple_unlink);
 
 int simple_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -304,6 +320,7 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
        drop_nlink(dir);
        return 0;
 }
+EXPORT_SYMBOL(simple_rmdir);
 
 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
@@ -330,6 +347,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        return 0;
 }
+EXPORT_SYMBOL(simple_rename);
 
 /**
  * simple_setattr - setattr for simple filesystem
@@ -370,6 +388,7 @@ int simple_readpage(struct file *file, struct page *page)
        unlock_page(page);
        return 0;
 }
+EXPORT_SYMBOL(simple_readpage);
 
 int simple_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
@@ -393,6 +412,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
        }
        return 0;
 }
+EXPORT_SYMBOL(simple_write_begin);
 
 /**
  * simple_write_end - .write_end helper for non-block-device FSes
@@ -444,6 +464,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
 
        return copied;
 }
+EXPORT_SYMBOL(simple_write_end);
 
 /*
  * the inodes created here are not hashed. If you use iunique to generate
@@ -512,6 +533,7 @@ out:
        dput(root);
        return -ENOMEM;
 }
+EXPORT_SYMBOL(simple_fill_super);
 
 static DEFINE_SPINLOCK(pin_fs_lock);
 
@@ -534,6 +556,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
        mntput(mnt);
        return 0;
 }
+EXPORT_SYMBOL(simple_pin_fs);
 
 void simple_release_fs(struct vfsmount **mount, int *count)
 {
@@ -545,6 +568,7 @@ void simple_release_fs(struct vfsmount **mount, int *count)
        spin_unlock(&pin_fs_lock);
        mntput(mnt);
 }
+EXPORT_SYMBOL(simple_release_fs);
 
 /**
  * simple_read_from_buffer - copy data from the buffer to user space
@@ -579,6 +603,7 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
        *ppos = pos + count;
        return count;
 }
+EXPORT_SYMBOL(simple_read_from_buffer);
 
 /**
  * simple_write_to_buffer - copy data from user space to the buffer
@@ -613,6 +638,7 @@ ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
        *ppos = pos + count;
        return count;
 }
+EXPORT_SYMBOL(simple_write_to_buffer);
 
 /**
  * memory_read_from_buffer - copy data from the buffer
@@ -644,6 +670,7 @@ ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
 
        return count;
 }
+EXPORT_SYMBOL(memory_read_from_buffer);
 
 /*
  * Transaction based IO.
@@ -665,6 +692,7 @@ void simple_transaction_set(struct file *file, size_t n)
        smp_mb();
        ar->size = n;
 }
+EXPORT_SYMBOL(simple_transaction_set);
 
 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
 {
@@ -696,6 +724,7 @@ char *simple_transaction_get(struct file *file, const char __user *buf, size_t s
 
        return ar->data;
 }
+EXPORT_SYMBOL(simple_transaction_get);
 
 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
@@ -705,12 +734,14 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size
                return 0;
        return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
 }
+EXPORT_SYMBOL(simple_transaction_read);
 
 int simple_transaction_release(struct inode *inode, struct file *file)
 {
        free_page((unsigned long)file->private_data);
        return 0;
 }
+EXPORT_SYMBOL(simple_transaction_release);
 
 /* Simple attribute files */
 
@@ -746,12 +777,14 @@ int simple_attr_open(struct inode *inode, struct file *file,
 
        return nonseekable_open(inode, file);
 }
+EXPORT_SYMBOL_GPL(simple_attr_open);
 
 int simple_attr_release(struct inode *inode, struct file *file)
 {
        kfree(file->private_data);
        return 0;
 }
+EXPORT_SYMBOL_GPL(simple_attr_release);        /* GPL-only?  This?  Really? */
 
 /* read from the buffer that is filled with the get function */
 ssize_t simple_attr_read(struct file *file, char __user *buf,
@@ -787,6 +820,7 @@ out:
        mutex_unlock(&attr->mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_read);
 
 /* interpret the buffer as a number to call the set function with */
 ssize_t simple_attr_write(struct file *file, const char __user *buf,
@@ -819,6 +853,7 @@ out:
        mutex_unlock(&attr->mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_write);
 
 /**
  * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
@@ -957,39 +992,13 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        return 0;
 }
-
-EXPORT_SYMBOL(dcache_dir_close);
-EXPORT_SYMBOL(dcache_dir_lseek);
-EXPORT_SYMBOL(dcache_dir_open);
-EXPORT_SYMBOL(dcache_readdir);
-EXPORT_SYMBOL(generic_read_dir);
-EXPORT_SYMBOL(mount_pseudo);
-EXPORT_SYMBOL(simple_write_begin);
-EXPORT_SYMBOL(simple_write_end);
-EXPORT_SYMBOL(simple_dir_inode_operations);
-EXPORT_SYMBOL(simple_dir_operations);
-EXPORT_SYMBOL(simple_empty);
-EXPORT_SYMBOL(simple_fill_super);
-EXPORT_SYMBOL(simple_getattr);
-EXPORT_SYMBOL(simple_open);
-EXPORT_SYMBOL(simple_link);
-EXPORT_SYMBOL(simple_lookup);
-EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_SYMBOL(simple_readpage);
-EXPORT_SYMBOL(simple_release_fs);
-EXPORT_SYMBOL(simple_rename);
-EXPORT_SYMBOL(simple_rmdir);
-EXPORT_SYMBOL(simple_statfs);
 EXPORT_SYMBOL(noop_fsync);
-EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
-EXPORT_SYMBOL(simple_write_to_buffer);
-EXPORT_SYMBOL(memory_read_from_buffer);
-EXPORT_SYMBOL(simple_transaction_set);
-EXPORT_SYMBOL(simple_transaction_get);
-EXPORT_SYMBOL(simple_transaction_read);
-EXPORT_SYMBOL(simple_transaction_release);
-EXPORT_SYMBOL_GPL(simple_attr_open);
-EXPORT_SYMBOL_GPL(simple_attr_release);
-EXPORT_SYMBOL_GPL(simple_attr_read);
-EXPORT_SYMBOL_GPL(simple_attr_write);
+
+void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
+                               void *cookie)
+{
+       char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+}
+EXPORT_SYMBOL(kfree_put_link);
index 9c501449450dc9be6891e5d9c1a035ca31b5687b..427bb73e298f197d4cfc73b17baeffdd1c848c5d 100644 (file)
@@ -245,8 +245,8 @@ static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
                goto out;
        if (memchr_inv(buf, 0xff, super->s_writesize))
                err = -EIO;
-       kfree(buf);
 out:
+       kfree(buf);
        return err;
 }
 
index 57914fc32b62538f43909d35ffc031742b98a881..57f994e887b5344daa7a47f279c233d41188fba1 100644 (file)
@@ -264,8 +264,8 @@ const struct inode_operations logfs_reg_iops = {
 };
 
 const struct file_operations logfs_reg_fops = {
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = logfs_fsync,
        .unlocked_ioctl = logfs_ioctl,
        .llseek         = generic_file_llseek,
index 54360293bcb5cd0680c3042e6f0b9b87e342c649..b256c0690e5b66f5b16a2b81be033cc3b345dec6 100644 (file)
@@ -287,14 +287,14 @@ static int logfs_make_writeable(struct super_block *sb)
        if (err)
                return err;
 
+       /* Do one GC pass before any data gets dirtied */
+       logfs_gc_pass(sb);
+
        /* Check areas for trailing unaccounted data */
        err = logfs_check_areas(sb);
        if (err)
                return err;
 
-       /* Do one GC pass before any data gets dirtied */
-       logfs_gc_pass(sb);
-
        /* after all initializations are done, replay the journal
         * for rw-mounts, if necessary */
        err = logfs_replay_journal(sb);
index 6624684dd5decbd55845113000ad1b783f029ebb..f2a0cfcef11dc6e82044d2bf06013104063ee4a3 100644 (file)
@@ -18,7 +18,7 @@ config MINIX_FS
 
 config MINIX_FS_NATIVE_ENDIAN
        def_bool MINIX_FS
-       depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
+       depends on M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
 
 config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED
        def_bool MINIX_FS
index adc6f5494231bc947f45d8a3c526db0b36f39bf3..346d8f37d342df53f5a7d9736431283e33eea431 100644 (file)
@@ -15,9 +15,9 @@
 const struct file_operations minix_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index 645268f23eb64cb8c2931ce33391beb2d0080c36..caa28051e197e898e3c2bc52afce37bcbb284853 100644 (file)
@@ -2294,10 +2294,11 @@ out:
  * path_mountpoint - look up a path to be umounted
  * @dfd:       directory file descriptor to start walk from
  * @name:      full pathname to walk
+ * @path:      pointer to container for result
  * @flags:     lookup flags
  *
  * Look up the given name, but don't attempt to revalidate the last component.
- * Returns 0 and "path" will be valid on success; Retuns error otherwise.
+ * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
index da5c494834306178dc9efd3b3826d1b7f0e0d17b..3ee6e59ead55a9ca2398a30b86f347315532a8b5 100644 (file)
@@ -39,7 +39,7 @@ static int mnt_group_start = 1;
 static struct list_head *mount_hashtable __read_mostly;
 static struct list_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
-static struct rw_semaphore namespace_sem;
+static DECLARE_RWSEM(namespace_sem);
 
 /* /sys/fs */
 struct kobject *fs_kobj;
@@ -1849,14 +1849,10 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                br_write_lock(&vfsmount_lock);
                mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
-               br_write_unlock(&vfsmount_lock);
-       }
-       up_write(&sb->s_umount);
-       if (!err) {
-               br_write_lock(&vfsmount_lock);
                touch_mnt_namespace(mnt->mnt_ns);
                br_write_unlock(&vfsmount_lock);
        }
+       up_write(&sb->s_umount);
        return err;
 }
 
@@ -2444,9 +2440,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                return ERR_CAST(new);
        }
        new_ns->root = new;
-       br_write_lock(&vfsmount_lock);
        list_add_tail(&new_ns->list, &new->mnt_list);
-       br_write_unlock(&vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2767,8 +2761,6 @@ void __init mnt_init(void)
        unsigned u;
        int err;
 
-       init_rwsem(&namespace_sem);
-
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
@@ -2802,11 +2794,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
 {
        if (!atomic_dec_and_test(&ns->count))
                return;
-       namespace_lock();
-       br_write_lock(&vfsmount_lock);
-       umount_tree(ns->root, 0);
-       br_write_unlock(&vfsmount_lock);
-       namespace_unlock();
+       drop_collected_mounts(&ns->root->mnt);
        free_mnt_ns(ns);
 }
 
@@ -2875,7 +2863,7 @@ bool fs_fully_visible(struct file_system_type *type)
        if (unlikely(!ns))
                return false;
 
-       namespace_lock();
+       down_read(&namespace_sem);
        list_for_each_entry(mnt, &ns->list, mnt_list) {
                struct mount *child;
                if (mnt->mnt.mnt_sb->s_type != type)
@@ -2896,7 +2884,7 @@ bool fs_fully_visible(struct file_system_type *type)
        next:   ;
        }
 found:
-       namespace_unlock();
+       up_read(&namespace_sem);
        return visible;
 }
 
index 3be047474bfc355731c08e2a6c27b40e73ce9e8b..c320ac52353e458723f46613893bcc0423ccc463 100644 (file)
@@ -339,9 +339,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
        if (val)
                goto finished;
 
-       DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               NCP_GET_AGE(dentry));
+       DDPRINTK("ncp_lookup_validate: %pd2 not valid, age=%ld, server lookup\n",
+               dentry, NCP_GET_AGE(dentry));
 
        len = sizeof(__name);
        if (ncp_is_server_root(dir)) {
@@ -359,8 +358,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
                        res = ncp_obtain_info(server, dir, __name, &(finfo.i));
        }
        finfo.volume = finfo.i.volNumber;
-       DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
-               dentry->d_parent->d_name.name, __name, res);
+       DDPRINTK("ncp_lookup_validate: looked for %pd/%s, res=%d\n",
+               dentry->d_parent, __name, res);
        /*
         * If we didn't find it, or if it has a different dirEntNum to
         * what we remember, it's not valid any more.
@@ -454,8 +453,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
        ctl.page  = NULL;
        ctl.cache = NULL;
 
-       DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
+       DDPRINTK("ncp_readdir: reading %pD2, pos=%d\n", file,
                (int) ctx->pos);
 
        result = -EIO;
@@ -740,12 +738,10 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx,
        int more;
        size_t bufsize;
 
-       DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
+       DPRINTK("ncp_do_readdir: %pD2, fpos=%ld\n", file,
                (unsigned long) ctx->pos);
-       PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
-               dentry->d_name.name, NCP_FINFO(dir)->volNumber,
-               NCP_FINFO(dir)->dirEntNum);
+       PPRINTK("ncp_do_readdir: init %pD, volnum=%d, dirent=%u\n",
+               file, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum);
 
        err = ncp_initialize_search(server, dir, &seq);
        if (err) {
@@ -850,8 +846,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
        if (!ncp_conn_valid(server))
                goto finished;
 
-       PPRINTK("ncp_lookup: server lookup for %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       PPRINTK("ncp_lookup: server lookup for %pd2\n", dentry);
 
        len = sizeof(__name);
        if (ncp_is_server_root(dir)) {
@@ -867,8 +862,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
                if (!res)
                        res = ncp_obtain_info(server, dir, __name, &(finfo.i));
        }
-       PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
-               dentry->d_parent->d_name.name, __name, res);
+       PPRINTK("ncp_lookup: looked for %pd2, res=%d\n", dentry, res);
        /*
         * If we didn't find an entry, make a negative dentry.
         */
@@ -915,8 +909,7 @@ out:
        return error;
 
 out_close:
-       PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       PPRINTK("ncp_instantiate: %pd2 failed, closing file\n", dentry);
        ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
        goto out;
 }
@@ -930,8 +923,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
        int opmode;
        __u8 __name[NCP_MAXPATHLEN + 1];
        
-       PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+       PPRINTK("ncp_create_new: creating %pd2, mode=%hx\n", dentry, mode);
 
        ncp_age_dentry(server, dentry);
        len = sizeof(__name);
@@ -960,8 +952,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
                                error = -ENAMETOOLONG;
                        else if (result < 0)
                                error = result;
-                       DPRINTK("ncp_create: %s/%s failed\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+                       DPRINTK("ncp_create: %pd2 failed\n", dentry);
                        goto out;
                }
                opmode = O_WRONLY;
@@ -994,8 +985,7 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        int error, len;
        __u8 __name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_mkdir: making %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_mkdir: making %pd2\n", dentry);
 
        ncp_age_dentry(server, dentry);
        len = sizeof(__name);
@@ -1032,8 +1022,7 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
        int error, result, len;
        __u8 __name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_rmdir: removing %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_rmdir: removing %pd2\n", dentry);
 
        len = sizeof(__name);
        error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -1078,8 +1067,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
        int error;
 
        server = NCP_SERVER(dir);
-       DPRINTK("ncp_unlink: unlinking %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_unlink: unlinking %pd2\n", dentry);
        
        /*
         * Check whether to close the file ...
@@ -1099,8 +1087,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
 #endif
        switch (error) {
                case 0x00:
-                       DPRINTK("ncp: removed %s/%s\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+                       DPRINTK("ncp: removed %pd2\n", dentry);
                        break;
                case 0x85:
                case 0x8A:
@@ -1133,9 +1120,7 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
        int old_len, new_len;
        __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_rename: %s/%s to %s/%s\n",
-               old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-               new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+       DPRINTK("ncp_rename: %pd2 to %pd2\n", old_dentry, new_dentry);
 
        ncp_age_dentry(server, old_dentry);
        ncp_age_dentry(server, new_dentry);
@@ -1165,8 +1150,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
 #endif
        switch (error) {
                case 0x00:
-                               DPRINTK("ncp renamed %s -> %s.\n",
-                                old_dentry->d_name.name,new_dentry->d_name.name);
+                               DPRINTK("ncp renamed %pd -> %pd.\n",
+                                old_dentry, new_dentry);
                        break;
                case 0x9E:
                        error = -ENAMETOOLONG;
index 122e260247f53c663550073fda567a4342b0ba63..8f5074e1ecb9eaf2b39b0064edc17f5528516ca2 100644 (file)
@@ -107,8 +107,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        void* freepage;
        size_t freelen;
 
-       DPRINTK("ncp_file_read: enter %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_read: enter %pd2\n", dentry);
 
        pos = *ppos;
 
@@ -166,8 +165,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 
        file_accessed(file);
 
-       DPRINTK("ncp_file_read: exit %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_read: exit %pd2\n", dentry);
 outrel:
        ncp_inode_close(inode);         
        return already_read ? already_read : error;
@@ -184,8 +182,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
        int errno;
        void* bouncebuffer;
 
-       DPRINTK("ncp_file_write: enter %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_write: enter %pd2\n", dentry);
        if ((ssize_t) count < 0)
                return -EINVAL;
        pos = *ppos;
@@ -264,8 +261,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
                        i_size_write(inode, pos);
                mutex_unlock(&inode->i_mutex);
        }
-       DPRINTK("ncp_file_write: exit %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_write: exit %pd2\n", dentry);
 outrel:
        ncp_inode_close(inode);         
        return already_written ? already_written : errno;
index 2dceee4db07652fd449ef713037a8a268d864f00..af0325864df667e91ef7e03faae6323b7f99532b 100644 (file)
@@ -590,6 +590,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
 
        if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_DISCRTRY;
+       if (test_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags))
+               args.flags |= RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT;
        if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
        if (test_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags))
index 854a8f05a61007bf14b2dd75e7f080fbb248cce4..76548d81f926c8843a291350a080f09938c0d0fd 100644 (file)
@@ -98,9 +98,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
        struct nfs_open_dir_context *ctx;
        struct rpc_cred *cred;
 
-       dfprintk(FILE, "NFS: open dir(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSOPEN);
 
@@ -297,11 +295,10 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
                                if (ctx->duped > 0
                                    && ctx->dup_cookie == *desc->dir_cookie) {
                                        if (printk_ratelimit()) {
-                                               pr_notice("NFS: directory %s/%s contains a readdir loop."
+                                               pr_notice("NFS: directory %pD2 contains a readdir loop."
                                                                "Please contact your server vendor.  "
                                                                "The file: %s has duplicate cookie %llu\n",
-                                                               desc->file->f_dentry->d_parent->d_name.name,
-                                                               desc->file->f_dentry->d_name.name,
+                                                               desc->file,
                                                                array->array[i].string.name,
                                                                *desc->dir_cookie);
                                        }
@@ -822,9 +819,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
        struct nfs_open_dir_context *dir_ctx = file->private_data;
        int res = 0;
 
-       dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       (long long)ctx->pos);
+       dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+                       file, (long long)ctx->pos);
        nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
 
        /*
@@ -880,22 +876,17 @@ out:
        nfs_unblock_sillyrename(dentry);
        if (res > 0)
                res = 0;
-       dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       res);
+       dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
        return res;
 }
 
 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct nfs_open_dir_context *dir_ctx = filp->private_data;
 
-       dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
-                       offset, whence);
+       dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
+                       filp, offset, whence);
 
        mutex_lock(&inode->i_mutex);
        switch (whence) {
@@ -925,15 +916,12 @@ out:
 static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
                         int datasync)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
-       dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       datasync);
+       dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
 
        mutex_lock(&inode->i_mutex);
-       nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC);
+       nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        mutex_unlock(&inode->i_mutex);
        return 0;
 }
@@ -1073,9 +1061,8 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
        }
 
        if (is_bad_inode(inode)) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+                               __func__, dentry);
                goto out_bad;
        }
 
@@ -1125,9 +1112,8 @@ out_set_verifier:
        nfs_advise_use_readdirplus(dir);
  out_valid_noent:
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+                       __func__, dentry);
        return 1;
 out_zap_parent:
        nfs_zap_caches(dir);
@@ -1147,18 +1133,16 @@ out_zap_parent:
                goto out_valid;
 
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+                       __func__, dentry);
        return 0;
 out_error:
        nfs_free_fattr(fattr);
        nfs_free_fhandle(fhandle);
        nfs4_label_free(label);
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name, error);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+                       __func__, dentry, error);
        return error;
 }
 
@@ -1182,16 +1166,14 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
         * eventually need to do something more here.
         */
        if (!inode) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
+                               __func__, dentry);
                return 1;
        }
 
        if (is_bad_inode(inode)) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+                               __func__, dentry);
                return 0;
        }
 
@@ -1206,9 +1188,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
  */
 static int nfs_dentry_delete(const struct dentry *dentry)
 {
-       dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               dentry->d_flags);
+       dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
+               dentry, dentry->d_flags);
 
        /* Unhash any dentry with a stale inode */
        if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
@@ -1286,8 +1267,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
        struct nfs4_label *label = NULL;
        int error;
 
-       dfprintk(VFS, "NFS: lookup(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
        nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
 
        res = ERR_PTR(-ENAMETOOLONG);
@@ -1381,7 +1361,7 @@ static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, i
 
 static int do_open(struct inode *inode, struct file *filp)
 {
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        return 0;
 }
 
@@ -1418,8 +1398,8 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        /* Expect a negative dentry */
        BUG_ON(dentry->d_inode);
 
-       dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: atomic_open(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        err = nfs_check_flags(open_flags);
        if (err)
@@ -1458,7 +1438,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
        trace_nfs_atomic_open_enter(dir, ctx, open_flags);
        nfs_block_sillyrename(dentry->d_parent);
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
        nfs_unblock_sillyrename(dentry->d_parent);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -1608,8 +1588,8 @@ int nfs_create(struct inode *dir, struct dentry *dentry,
        int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
        int error;
 
-       dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: create(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        attr.ia_mode = mode;
        attr.ia_valid = ATTR_MODE;
@@ -1635,8 +1615,8 @@ nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
        struct iattr attr;
        int status;
 
-       dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: mknod(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -1664,8 +1644,8 @@ int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct iattr attr;
        int error;
 
-       dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: mkdir(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        attr.ia_valid = ATTR_MODE;
        attr.ia_mode = mode | S_IFDIR;
@@ -1692,8 +1672,8 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        int error;
 
-       dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: rmdir(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        trace_nfs_rmdir_enter(dir, dentry);
        if (dentry->d_inode) {
@@ -1728,8 +1708,7 @@ static int nfs_safe_remove(struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        int error = -EBUSY;
                
-       dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
 
        /* If the dentry was sillyrenamed, we simply call d_delete() */
        if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
@@ -1762,8 +1741,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
        int error;
        int need_rehash = 0;
 
-       dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
-               dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: unlink(%s/%ld, %pd)\n", dir->i_sb->s_id,
+               dir->i_ino, dentry);
 
        trace_nfs_unlink_enter(dir, dentry);
        spin_lock(&dentry->d_lock);
@@ -1813,8 +1792,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
        unsigned int pathlen = strlen(symname);
        int error;
 
-       dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
-               dir->i_ino, dentry->d_name.name, symname);
+       dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s)\n", dir->i_sb->s_id,
+               dir->i_ino, dentry, symname);
 
        if (pathlen > PAGE_SIZE)
                return -ENAMETOOLONG;
@@ -1836,9 +1815,9 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
        error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
        trace_nfs_symlink_exit(dir, dentry, error);
        if (error != 0) {
-               dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
+               dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s) error %d\n",
                        dir->i_sb->s_id, dir->i_ino,
-                       dentry->d_name.name, symname, error);
+                       dentry, symname, error);
                d_drop(dentry);
                __free_page(page);
                return error;
@@ -1865,9 +1844,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
        struct inode *inode = old_dentry->d_inode;
        int error;
 
-       dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
-               old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
+               old_dentry, dentry);
 
        trace_nfs_link_enter(inode, dir, dentry);
        NFS_PROTO(inode)->return_delegation(inode);
@@ -1915,9 +1893,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *dentry = NULL, *rehash = NULL;
        int error = -EBUSY;
 
-       dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
-                old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-                new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+       dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
+                old_dentry, new_dentry,
                 d_count(new_dentry));
 
        trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
index 91ff089d34126d8ae5d8d4a96bca232fbd921958..87a6475eb170996972969b30779949742b1c4a57 100644 (file)
@@ -90,6 +90,7 @@ struct nfs_direct_req {
        int                     flags;
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
+#define NFS_ODIRECT_MARK_DIRTY         (4)     /* mark read pages dirty */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
@@ -112,32 +113,22 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
  * nfs_direct_IO - NFS address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
  * @nr_segs: size of iovec array
  *
  * The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, for most direct IO, we
- * shunt off direct read and write requests before the VFS gets them,
- * so this method is only ever called for swap.
+ * the NFS client supports direct I/O. However, we shunt off direct
+ * read and write requests before the VFS gets them, so this method
+ * should never be called.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                     loff_t pos)
 {
-#ifndef CONFIG_NFS_SWAP
-       dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
-                       iocb->ki_filp->f_path.dentry->d_name.name,
-                       (long long) pos, nr_segs);
+       dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
+                       iocb->ki_filp, (long long) pos, iter->nr_segs);
 
        return -EINVAL;
-#else
-       VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
-
-       if (rw == READ || rw == KERNEL_READ)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos,
-                               rw == READ ? true : false);
-       return nfs_file_direct_write(iocb, iov, nr_segs, pos,
-                               rw == WRITE ? true : false);
-#endif /* CONFIG_NFS_SWAP */
 }
 
 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -265,7 +256,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
                struct page *page = req->wb_page;
 
-               if (!PageCompound(page) && bytes < hdr->good_bytes)
+               if ((dreq->flags & NFS_ODIRECT_MARK_DIRTY) &&
+                   !PageCompound(page) && bytes < hdr->good_bytes)
                        set_page_dirty(page);
                bytes += req->wb_bytes;
                nfs_list_remove_request(req);
@@ -308,7 +300,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  */
 static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                const struct iovec *iov,
-                                               loff_t pos, bool uio)
+                                               loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -336,20 +328,12 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
                                          GFP_KERNEL);
                if (!pagevec)
                        break;
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
                                        npages, 1, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 1, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
-
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
                        if (bytes <= pgbase) {
@@ -397,24 +381,17 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
        return result < 0 ? (ssize_t) result : -EFAULT;
 }
 
-static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
-                                             const struct iovec *iov,
-                                             unsigned long nr_segs,
-                                             loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_read_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
        ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
-                            &nfs_direct_read_completion_ops);
-       get_dreq(dreq);
-       desc.pg_dreq = dreq;
-
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_read_schedule_segment(desc, vec, pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -422,6 +399,75 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_read_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = ctx->dentry->d_inode;
+       ssize_t result = -EINVAL;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               result = -EIO;
+               req_len = bvec[seg].bv_len;
+               req = nfs_create_request(ctx, inode,
+                                        bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq,
+                                       struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       ssize_t result;
+
+       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
+                            &nfs_direct_read_completion_ops);
+       get_dreq(dreq);
+       desc.pg_dreq = dreq;
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_read_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_read_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
 
        nfs_pageio_complete(&desc);
 
@@ -429,9 +475,9 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -439,8 +485,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos, bool uio)
+static ssize_t nfs_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -452,7 +498,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = iov_length(iov, nr_segs);
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -463,8 +509,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       NFS_I(inode)->read_io += iov_length(iov, nr_segs);
-       result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       NFS_I(inode)->read_io += iov_iter_count(iter);
+       result = nfs_direct_read_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -629,7 +675,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
  */
 static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                 const struct iovec *iov,
-                                                loff_t pos, bool uio)
+                                                loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -657,19 +703,12 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
                if (!pagevec)
                        break;
 
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
-                                               npages, 0, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 0, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
+                                       npages, 0, 0, pagevec, NULL);
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
 
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
@@ -798,27 +837,18 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
        .completion = nfs_direct_write_completion,
 };
 
-static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
-                                              const struct iovec *iov,
-                                              unsigned long nr_segs,
-                                              loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_write_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
-       struct inode *inode = dreq->inode;
-       ssize_t result = 0;
+       ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
-                             &nfs_direct_write_completion_ops);
-       desc.pg_dreq = dreq;
-       get_dreq(dreq);
-       atomic_inc(&inode->i_dio_count);
-
-       NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_write_schedule_segment(desc, vec,
+                                                          pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -826,16 +856,91 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_write_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               req_len = bvec[seg].bv_len;
+
+               req = nfs_create_request(ctx, inode, bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               nfs_lock_request(req);
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_unlock_and_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq,
+                                        struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+
+       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
+       get_dreq(dreq);
+       atomic_inc(&inode->i_dio_count);
+
+       NFS_I(dreq->inode)->write_io += iov_iter_count(iter);
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_write_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_write_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
+
        nfs_pageio_complete(&desc);
 
        /*
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                inode_dio_done(inode);
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -843,9 +948,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos,
-                               size_t count, bool uio)
+static ssize_t nfs_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -857,7 +961,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = count;
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -868,7 +972,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       result = nfs_direct_write_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -880,12 +984,11 @@ out:
 /**
  * nfs_file_direct_read - file direct read operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers into which to read data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers into which to read data
  * @pos: byte offset in file where reading starts
  *
  * We use this function for direct reads instead of calling
- * generic_file_aio_read() in order to avoid gfar's check to see if
+ * generic_file_read_iter() in order to avoid gfar's check to see if
  * the request starts before the end of the file.  For that check
  * to work, we must generate a GETATTR before each direct read, and
  * even then there is a window between the GETATTR and the subsequent
@@ -898,21 +1001,19 @@ out:
  * client must read the updated atime from the server back into its
  * cache.
  */
-ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
-       dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               count, (long long) pos);
+       dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
+               file, count, (long long) pos);
 
        retval = 0;
        if (!count)
@@ -924,7 +1025,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_read(count);
 
-       retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
+       retval = nfs_direct_read(iocb, iter, pos);
        if (retval > 0)
                iocb->ki_pos = pos + retval;
 
@@ -935,12 +1036,11 @@ out:
 /**
  * nfs_file_direct_write - file direct write operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers from which to write data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers from which to write data
  * @pos: byte offset in file where writing starts
  *
  * We use this function for direct writes instead of calling
- * generic_file_aio_write() in order to avoid taking the inode
+ * generic_file_write_iter() in order to avoid taking the inode
  * semaphore and updating the i_size.  The NFS server will set
  * the new i_size and this client must read the updated size
  * back into its cache.  We let the server do generic write
@@ -954,21 +1054,19 @@ out:
  * Note that O_APPEND is not supported for NFS direct writes, as there
  * is no atomic O_APPEND write facility in the NFS protocol.
  */
-ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 
-       dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               count, (long long) pos);
+       dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
+               file, count, (long long) pos);
 
        retval = generic_write_checks(file, &pos, &count, 0);
        if (retval)
@@ -987,7 +1085,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_write(count);
 
-       retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
+       retval = nfs_direct_write(iocb, iter, pos);
        if (retval > 0) {
                struct inode *inode = mapping->host;
 
index 1e6bfdbc1aff4403a194798d3c3928993948710d..e022fe909ded953ae0403e1252d6fb8597d7c4c7 100644 (file)
@@ -65,9 +65,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
 {
        int res;
 
-       dprintk("NFS: open file(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dprintk("NFS: open file(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSOPEN);
        res = nfs_check_flags(filp->f_flags);
@@ -81,9 +79,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
 int
 nfs_file_release(struct inode *inode, struct file *filp)
 {
-       dprintk("NFS: release(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dprintk("NFS: release(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
        return nfs_release(inode, filp);
@@ -123,10 +119,8 @@ force_reval:
 
 loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
 {
-       dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       offset, whence);
+       dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
+                       filp, offset, whence);
 
        /*
         * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
@@ -150,12 +144,9 @@ EXPORT_SYMBOL_GPL(nfs_file_llseek);
 int
 nfs_file_flush(struct file *file, fl_owner_t id)
 {
-       struct dentry   *dentry = file->f_path.dentry;
-       struct inode    *inode = dentry->d_inode;
+       struct inode    *inode = file_inode(file);
 
-       dprintk("NFS: flush(%s/%s)\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dprintk("NFS: flush(%pD2)\n", file);
 
        nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
        if ((file->f_mode & FMODE_WRITE) == 0)
@@ -174,42 +165,38 @@ nfs_file_flush(struct file *file, fl_owner_t id)
 EXPORT_SYMBOL_GPL(nfs_file_flush);
 
 ssize_t
-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+nfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
-       struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-       struct inode * inode = dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t result;
 
        if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
+               return nfs_file_direct_read(iocb, iter, pos);
 
-       dprintk("NFS: read(%s/%s, %lu@%lu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
+       dprintk("NFS: read_iter(%pD2, %lu@%lu)\n",
+               iocb->ki_filp,
+               (unsigned long) iov_iter_count(iter), (unsigned long) pos);
 
        result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
        if (!result) {
-               result = generic_file_aio_read(iocb, iov, nr_segs, pos);
+               result = generic_file_read_iter(iocb, iter, pos);
                if (result > 0)
                        nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
        }
        return result;
 }
-EXPORT_SYMBOL_GPL(nfs_file_read);
+EXPORT_SYMBOL_GPL(nfs_file_read_iter);
 
 ssize_t
 nfs_file_splice_read(struct file *filp, loff_t *ppos,
                     struct pipe_inode_info *pipe, size_t count,
                     unsigned int flags)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        ssize_t res;
 
-       dprintk("NFS: splice_read(%s/%s, %lu@%Lu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (unsigned long long) *ppos);
+       dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
+               filp, (unsigned long) count, (unsigned long long) *ppos);
 
        res = nfs_revalidate_mapping(inode, filp->f_mapping);
        if (!res) {
@@ -224,12 +211,10 @@ EXPORT_SYMBOL_GPL(nfs_file_splice_read);
 int
 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int     status;
 
-       dprintk("NFS: mmap(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dprintk("NFS: mmap(%pD2)\n", file);
 
        /* Note: generic_file_mmap() returns ENOSYS on nommu systems
         *       so we call that before revalidating the mapping
@@ -252,21 +237,18 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
  * disk, but it retrieves and clears ctx->error after synching, despite
  * the two being set at the same time in nfs_context_set_write_error().
  * This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occurred, and hence cause it to
+ * nfs_file_write_iter() that a write error occurred, and hence cause it to
  * fall back to doing a synchronous write.
  */
 int
 nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct dentry *dentry = file->f_path.dentry;
        struct nfs_open_context *ctx = nfs_file_open_context(file);
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int have_error, do_resend, status;
        int ret = 0;
 
-       dprintk("NFS: fsync file(%s/%s) datasync %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       datasync);
+       dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
 
        nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
@@ -371,10 +353,8 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
        struct page *page;
        int once_thru = 0;
 
-       dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               mapping->host->i_ino, len, (long long) pos);
+       dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%ld), %u@%lld)\n",
+               file, mapping->host->i_ino, len, (long long) pos);
 
 start:
        /*
@@ -414,10 +394,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
        struct nfs_open_context *ctx = nfs_file_open_context(file);
        int status;
 
-       dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               mapping->host->i_ino, len, (long long) pos);
+       dfprintk(PAGECACHE, "NFS: write_end(%pD2(%ld), %u@%lld)\n",
+               file, mapping->host->i_ino, len, (long long) pos);
 
        /*
         * Zero any uninitialised parts of the page, and then mark the page
@@ -601,22 +579,21 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
        struct file *filp = vma->vm_file;
-       struct dentry *dentry = filp->f_path.dentry;
+       struct inode *inode = file_inode(filp);
        unsigned pagelen;
        int ret = VM_FAULT_NOPAGE;
        struct address_space *mapping;
 
-       dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               filp->f_mapping->host->i_ino,
+       dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%ld), offset %lld)\n",
+               filp, filp->f_mapping->host->i_ino,
                (long long)page_offset(page));
 
        /* make sure the cache has finished storing the page */
-       nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
+       nfs_fscache_wait_on_page_write(NFS_I(inode), page);
 
        lock_page(page);
        mapping = page_file_mapping(page);
-       if (mapping != dentry->d_inode->i_mapping)
+       if (mapping != inode->i_mapping)
                goto out_unlock;
 
        wait_on_page_writeback(page);
@@ -656,25 +633,24 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
        return 0;
 }
 
-ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t nfs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                           loff_t pos)
 {
-       struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-       struct inode * inode = dentry->d_inode;
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
        unsigned long written = 0;
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
-       result = nfs_key_timeout_notify(iocb->ki_filp, inode);
+       result = nfs_key_timeout_notify(file, inode);
        if (result)
                return result;
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
+       if (file->f_flags & O_DIRECT)
+               return nfs_file_direct_write(iocb, iter, pos);
 
-       dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (long long) pos);
+       dprintk("NFS: write_iter(%pD2, %lu@%Ld)\n",
+               file, (unsigned long) count, (long long) pos);
 
        result = -EBUSY;
        if (IS_SWAPFILE(inode))
@@ -682,8 +658,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
        /*
         * O_APPEND implies that we must revalidate the file length.
         */
-       if (iocb->ki_filp->f_flags & O_APPEND) {
-               result = nfs_revalidate_file_size(inode, iocb->ki_filp);
+       if (file->f_flags & O_APPEND) {
+               result = nfs_revalidate_file_size(inode, file);
                if (result)
                        goto out;
        }
@@ -692,13 +668,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                goto out;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (result > 0)
                written = result;
 
        /* Return error values for O_DSYNC and IS_SYNC() */
-       if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
-               int err = vfs_fsync(iocb->ki_filp, 0);
+       if (result >= 0 && nfs_need_sync_write(file, inode)) {
+               int err = vfs_fsync(file, 0);
                if (err < 0)
                        result = err;
        }
@@ -711,20 +687,18 @@ out_swapfile:
        printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
        goto out;
 }
-EXPORT_SYMBOL_GPL(nfs_file_write);
+EXPORT_SYMBOL_GPL(nfs_file_write_iter);
 
 ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
                              struct file *filp, loff_t *ppos,
                              size_t count, unsigned int flags)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned long written = 0;
        ssize_t ret;
 
-       dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (unsigned long long) *ppos);
+       dprintk("NFS splice_write(%pD2, %lu@%llu)\n",
+               filp, (unsigned long) count, (unsigned long long) *ppos);
 
        /*
         * The combination of splice and an O_APPEND destination is disallowed.
@@ -883,10 +857,8 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
        int ret = -ENOLCK;
        int is_local = 0;
 
-       dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       fl->fl_type, fl->fl_flags,
+       dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
+                       filp, fl->fl_type, fl->fl_flags,
                        (long long)fl->fl_start, (long long)fl->fl_end);
 
        nfs_inc_stats(inode, NFSIOS_VFSLOCK);
@@ -923,10 +895,8 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
        struct inode *inode = filp->f_mapping->host;
        int is_local = 0;
 
-       dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       fl->fl_type, fl->fl_flags);
+       dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
+                       filp, fl->fl_type, fl->fl_flags);
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
@@ -960,9 +930,7 @@ EXPORT_SYMBOL_GPL(nfs_flock);
  */
 int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
 {
-       dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
-                       file->f_path.dentry->d_parent->d_name.name,
-                       file->f_path.dentry->d_name.name, arg);
+       dprintk("NFS: setlease(%pD2, arg=%ld)\n", file, arg);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(nfs_setlease);
@@ -971,8 +939,8 @@ const struct file_operations nfs_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs_file_open,
        .flush          = nfs_file_flush,
index 24d1d1c5fcaf9e50b7528d7ef2a9eff6fcf9f018..3ef01f0ba0bcafa2bdfe0c6088e367c12dd76668 100644 (file)
@@ -39,7 +39,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
        /* create a cache index for looking up filehandles */
        clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
                                              &nfs_fscache_server_index_def,
-                                             clp);
+                                             clp, true);
        dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
                 clp, clp->fscache);
 }
@@ -139,7 +139,7 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
        /* create a cache index for looking up filehandles */
        nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
                                               &nfs_fscache_super_index_def,
-                                              nfss);
+                                              nfss, true);
        dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
                 nfss, nfss->fscache);
        return;
@@ -178,163 +178,79 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
 /*
  * Initialise the per-inode cache cookie pointer for an NFS inode.
  */
-void nfs_fscache_init_inode_cookie(struct inode *inode)
+void nfs_fscache_init_inode(struct inode *inode)
 {
-       NFS_I(inode)->fscache = NULL;
-       if (S_ISREG(inode->i_mode))
-               set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-}
-
-/*
- * Get the per-inode cache cookie for an NFS inode.
- */
-static void nfs_fscache_enable_inode_cookie(struct inode *inode)
-{
-       struct super_block *sb = inode->i_sb;
        struct nfs_inode *nfsi = NFS_I(inode);
 
-       if (nfsi->fscache || !NFS_FSCACHE(inode))
+       nfsi->fscache = NULL;
+       if (!S_ISREG(inode->i_mode))
                return;
-
-       if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
-               nfsi->fscache = fscache_acquire_cookie(
-                       NFS_SB(sb)->fscache,
-                       &nfs_fscache_inode_object_def,
-                       nfsi);
-
-               dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
-                        sb, nfsi, nfsi->fscache);
-       }
+       nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
+                                              &nfs_fscache_inode_object_def,
+                                              nfsi, false);
 }
 
 /*
  * Release a per-inode cookie.
  */
-void nfs_fscache_release_inode_cookie(struct inode *inode)
+void nfs_fscache_clear_inode(struct inode *inode)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
-       dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
-                nfsi, nfsi->fscache);
+       dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
 
-       fscache_relinquish_cookie(nfsi->fscache, 0);
+       fscache_relinquish_cookie(cookie, false);
        nfsi->fscache = NULL;
 }
 
-/*
- * Retire a per-inode cookie, destroying the data attached to it.
- */
-void nfs_fscache_zap_inode_cookie(struct inode *inode)
+static bool nfs_fscache_can_enable(void *data)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
+       struct inode *inode = data;
 
-       dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
-                nfsi, nfsi->fscache);
-
-       fscache_relinquish_cookie(nfsi->fscache, 1);
-       nfsi->fscache = NULL;
+       return !inode_is_open_for_write(inode);
 }
 
 /*
- * Turn off the cache with regard to a per-inode cookie if opened for writing,
- * invalidating all the pages in the page cache relating to the associated
- * inode to clear the per-page caching.
- */
-static void nfs_fscache_disable_inode_cookie(struct inode *inode)
-{
-       clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-
-       if (NFS_I(inode)->fscache) {
-               dfprintk(FSCACHE,
-                        "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
-
-               /* Need to uncache any pages attached to this inode that
-                * fscache knows about before turning off the cache.
-                */
-               fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
-               nfs_fscache_zap_inode_cookie(inode);
-       }
-}
-
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-static int nfs_fscache_wait_bit(void *flags)
-{
-       schedule();
-       return 0;
-}
-
-/*
- * Lock against someone else trying to also acquire or relinquish a cookie
- */
-static inline void nfs_fscache_inode_lock(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
-               wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
-                           nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-}
-
-/*
- * Unlock cookie management lock
- */
-static inline void nfs_fscache_inode_unlock(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       smp_mb__before_clear_bit();
-       clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
-       smp_mb__after_clear_bit();
-       wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
-}
-
-/*
- * Decide if we should enable or disable local caching for this inode.
- * - For now, with NFS, only regular files that are open read-only will be able
- *   to use the cache.
- * - May be invoked multiple times in parallel by parallel nfs_open() functions.
- */
-void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
-{
-       if (NFS_FSCACHE(inode)) {
-               nfs_fscache_inode_lock(inode);
-               if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
-                       nfs_fscache_disable_inode_cookie(inode);
-               else
-                       nfs_fscache_enable_inode_cookie(inode);
-               nfs_fscache_inode_unlock(inode);
-       }
-}
-EXPORT_SYMBOL_GPL(nfs_fscache_set_inode_cookie);
-
-/*
- * Replace a per-inode cookie due to revalidation detecting a file having
- * changed on the server.
+ * Enable or disable caching for a file that is being opened as appropriate.
+ * The cookie is allocated when the inode is initialised, but is not enabled at
+ * that time.  Enablement is deferred to file-open time to avoid stat() and
+ * access() thrashing the cache.
+ *
+ * For now, with NFS, only regular files that are open read-only will be able
+ * to use the cache.
+ *
+ * We enable the cache for an inode if we open it read-only and it isn't
+ * currently open for writing.  We disable the cache if the inode is open
+ * write-only.
+ *
+ * The caller uses the file struct to pin i_writecount on the inode before
+ * calling us when a file is opened for writing, so we can make use of that.
+ *
+ * Note that this may be invoked multiple times in parallel by parallel
+ * nfs_open() functions.
  */
-void nfs_fscache_reset_inode_cookie(struct inode *inode)
+void nfs_fscache_open_file(struct inode *inode, struct file *filp)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
-       struct nfs_server *nfss = NFS_SERVER(inode);
-       NFS_IFDEBUG(struct fscache_cookie *old = nfsi->fscache);
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
-       nfs_fscache_inode_lock(inode);
-       if (nfsi->fscache) {
-               /* retire the current fscache cache and get a new one */
-               fscache_relinquish_cookie(nfsi->fscache, 1);
-
-               nfsi->fscache = fscache_acquire_cookie(
-                       nfss->nfs_client->fscache,
-                       &nfs_fscache_inode_object_def,
-                       nfsi);
+       if (!fscache_cookie_valid(cookie))
+               return;
 
-               dfprintk(FSCACHE,
-                        "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
-                        nfss, nfsi, old, nfsi->fscache);
+       if (inode_is_open_for_write(inode)) {
+               dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
+               clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
+               fscache_disable_cookie(cookie, true);
+               fscache_uncache_all_inode_pages(cookie, inode);
+       } else {
+               dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
+               fscache_enable_cookie(cookie, nfs_fscache_can_enable, inode);
+               if (fscache_cookie_enabled(cookie))
+                       set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
        }
-       nfs_fscache_inode_unlock(inode);
 }
+EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
 
 /*
  * Release the caching state associated with a page, if the page isn't busy
@@ -344,12 +260,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 {
        if (PageFsCache(page)) {
-               struct nfs_inode *nfsi = NFS_I(page->mapping->host);
-               struct fscache_cookie *cookie = nfsi->fscache;
+               struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
 
                BUG_ON(!cookie);
                dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
-                        cookie, page, nfsi);
+                        cookie, page, NFS_I(page->mapping->host));
 
                if (!fscache_maybe_release_page(cookie, page, gfp))
                        return 0;
@@ -367,13 +282,12 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
  */
 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
-       struct fscache_cookie *cookie = nfsi->fscache;
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
        BUG_ON(!cookie);
 
        dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
-                cookie, page, nfsi);
+                cookie, page, NFS_I(inode));
 
        fscache_wait_on_page_write(cookie, page);
 
@@ -417,9 +331,9 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
 
        dfprintk(FSCACHE,
                 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
-                NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+                nfs_i_fscache(inode), page, page->index, page->flags, inode);
 
-       ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+       ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
                                         page,
                                         nfs_readpage_from_fscache_complete,
                                         ctx,
@@ -459,9 +373,9 @@ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
        int ret;
 
        dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
-                NFS_I(inode)->fscache, npages, inode);
+                nfs_i_fscache(inode), npages, inode);
 
-       ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+       ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
                                          mapping, pages, nr_pages,
                                          nfs_readpage_from_fscache_complete,
                                          ctx,
@@ -506,15 +420,15 @@ void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
 
        dfprintk(FSCACHE,
                 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
-                NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+                nfs_i_fscache(inode), page, page->index, page->flags, sync);
 
-       ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
+       ret = fscache_write_page(nfs_i_fscache(inode), page, GFP_KERNEL);
        dfprintk(FSCACHE,
                 "NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
                 page, page->index, page->flags, ret);
 
        if (ret != 0) {
-               fscache_uncache_page(NFS_I(inode)->fscache, page);
+               fscache_uncache_page(nfs_i_fscache(inode), page);
                nfs_add_fscache_stats(inode,
                                      NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
                nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
index 4ecb76652eba6059402c0d9e7320e4cdc1603b82..d7fe3e799f2fc0ee4c83f226f5dee1cc72ca0cc7 100644 (file)
@@ -76,11 +76,9 @@ extern void nfs_fscache_release_client_cookie(struct nfs_client *);
 extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
 extern void nfs_fscache_release_super_cookie(struct super_block *);
 
-extern void nfs_fscache_init_inode_cookie(struct inode *);
-extern void nfs_fscache_release_inode_cookie(struct inode *);
-extern void nfs_fscache_zap_inode_cookie(struct inode *);
-extern void nfs_fscache_set_inode_cookie(struct inode *, struct file *);
-extern void nfs_fscache_reset_inode_cookie(struct inode *);
+extern void nfs_fscache_init_inode(struct inode *);
+extern void nfs_fscache_clear_inode(struct inode *);
+extern void nfs_fscache_open_file(struct inode *, struct file *);
 
 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
 extern int nfs_fscache_release_page(struct page *, gfp_t);
@@ -187,12 +185,10 @@ static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
 
 static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
 
-static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_release_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_zap_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_set_inode_cookie(struct inode *inode,
-                                               struct file *filp) {}
-static inline void nfs_fscache_reset_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_init_inode(struct inode *inode) {}
+static inline void nfs_fscache_clear_inode(struct inode *inode) {}
+static inline void nfs_fscache_open_file(struct inode *inode,
+                                        struct file *filp) {}
 
 static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 {
index eda8879171c47e3225ee6891a77aac70e1531af2..bb90bff0cb7a296f4b50a40b78974484fb221a9d 100644 (file)
@@ -122,7 +122,7 @@ void nfs_clear_inode(struct inode *inode)
        WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
        nfs_zap_acl_cache(inode);
        nfs_access_zap_cache(inode);
-       nfs_fscache_release_inode_cookie(inode);
+       nfs_fscache_clear_inode(inode);
 }
 EXPORT_SYMBOL_GPL(nfs_clear_inode);
 
@@ -459,7 +459,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                nfsi->attrtimeo_timestamp = now;
                nfsi->access_cache = RB_ROOT;
 
-               nfs_fscache_init_inode_cookie(inode);
+               nfs_fscache_init_inode(inode);
 
                unlock_new_inode(inode);
        } else
@@ -854,7 +854,7 @@ int nfs_open(struct inode *inode, struct file *filp)
                return PTR_ERR(ctx);
        nfs_file_set_open_context(filp, ctx);
        put_nfs_open_context(ctx);
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        return 0;
 }
 
index 38da8c2b81ac09d526b3b402d1964e0c7c17c2dc..32f7a4f415a1559e5b7b5de5e7627a860dae1938 100644 (file)
@@ -291,11 +291,11 @@ int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *)
 int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
 loff_t nfs_file_llseek(struct file *, loff_t, int);
 int nfs_file_flush(struct file *, fl_owner_t);
-ssize_t nfs_file_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_read_iter(struct kiocb *, struct iov_iter *, loff_t);
 ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
                             size_t, unsigned int);
 int nfs_file_mmap(struct file *, struct vm_area_struct *);
-ssize_t nfs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_write_iter(struct kiocb *, struct iov_iter *, loff_t);
 int nfs_file_release(struct inode *, struct file *);
 int nfs_lock(struct file *, int, struct file_lock *);
 int nfs_flock(struct file *, int, struct file_lock *);
index 348b535cd7866d9e18cfa6f9412b650266244f0a..b5a0afc3ee101b988fc18df2b009c20e56812534 100644 (file)
@@ -253,9 +253,8 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        dprintk("--> nfs_do_submount()\n");
 
-       dprintk("%s: submounting on %s/%s\n", __func__,
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dprintk("%s: submounting on %pd2\n", __func__,
+                       dentry);
        if (page == NULL)
                goto out;
        devname = nfs_devname(dentry, page, PAGE_SIZE);
index 90cb10d7b6936d1fc478f46572728e65e3874f29..01b6f6a49d162ef0ea8720786e259fd5f66aa9d3 100644 (file)
@@ -321,7 +321,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  create %s\n", dentry->d_name.name);
+       dprintk("NFS call  create %pd\n", dentry);
 
        data = nfs3_alloc_createdata();
        if (data == NULL)
@@ -548,7 +548,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
        if (len > NFS3_MAXPATHLEN)
                return -ENAMETOOLONG;
 
-       dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+       dprintk("NFS call  symlink %pd\n", dentry);
 
        data = nfs3_alloc_createdata();
        if (data == NULL)
@@ -576,7 +576,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+       dprintk("NFS call  mkdir %pd\n", dentry);
 
        sattr->ia_mode &= ~current_umask();
 
@@ -695,7 +695,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mknod %s %u:%u\n", dentry->d_name.name,
+       dprintk("NFS call  mknod %pd %u:%u\n", dentry,
                        MAJOR(rdev), MINOR(rdev));
 
        sattr->ia_mode &= ~current_umask();
index a860ab566d6e98e5ce2f2f1c42686a15837254bc..511cdce6ecf2f067b4effb1026cf1d236cbb0ae9 100644 (file)
@@ -368,6 +368,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        if (clp->cl_minorversion != 0)
                __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
        __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+       __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
        error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
        if (error == -EINVAL)
                error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
index e5b804dd944c16a8adf4de17ee6588562cec55e8..c34007ae921af48bbf9c463478f5b093b4713b99 100644 (file)
@@ -19,6 +19,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        struct inode *dir;
        unsigned openflags = filp->f_flags;
        struct iattr attr;
+       int opened = 0;
        int err;
 
        /*
@@ -30,9 +31,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
         * -EOPENSTALE.  The VFS will retry the lookup/create/open.
         */
 
-       dprintk("NFS: open file(%s/%s)\n",
-               dentry->d_parent->d_name.name,
-               dentry->d_name.name);
+       dprintk("NFS: open file(%pd2)\n", dentry);
 
        if ((openflags & O_ACCMODE) == 3)
                openflags--;
@@ -55,7 +54,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
                nfs_wb_all(inode);
        }
 
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                switch (err) {
@@ -74,7 +73,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
 
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
        nfs_file_set_open_context(filp, ctx);
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        err = 0;
 
 out_put_ctx:
@@ -121,8 +120,8 @@ const struct file_operations nfs4_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs4_file_open,
        .flush          = nfs_file_flush,
index 95604f64cab86632d7a166ce588ea9ffd5d87e95..c7c295e556ed87501c069053d0c133e44dcadc97 100644 (file)
@@ -185,6 +185,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
        if (status)
                goto out_put;
 
+       smp_wmb();
        ds->ds_clp = clp;
        dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
 out:
@@ -801,34 +802,35 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
        struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
        struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
-
-       if (filelayout_test_devid_unavailable(devid))
-               return NULL;
+       struct nfs4_pnfs_ds *ret = ds;
 
        if (ds == NULL) {
                printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
                        __func__, ds_idx);
                filelayout_mark_devid_invalid(devid);
-               return NULL;
+               goto out;
        }
+       smp_rmb();
        if (ds->ds_clp)
-               return ds;
+               goto out_test_devid;
 
        if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
                struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
                int err;
 
                err = nfs4_ds_connect(s, ds);
-               if (err) {
+               if (err)
                        nfs4_mark_deviceid_unavailable(devid);
-                       ds = NULL;
-               }
                nfs4_clear_ds_conn_bit(ds);
        } else {
                /* Either ds is connected, or ds is NULL */
                nfs4_wait_ds_connect(ds);
        }
-       return ds;
+out_test_devid:
+       if (filelayout_test_devid_unavailable(devid))
+               ret = NULL;
+out:
+       return ret;
 }
 
 module_param(dataserver_retrans, uint, 0644);
index 2288cd3c92784305c9669fe5e87682ad3293c843..049b9fb0d2c9c613e16eaa1fce5179acaa7d2ff1 100644 (file)
@@ -283,8 +283,7 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
        if (locations == NULL || locations->nlocations <= 0)
                goto out;
 
-       dprintk("%s: referral at %s/%s\n", __func__,
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dprintk("%s: referral at %pd2\n", __func__, dentry);
 
        page = (char *) __get_free_page(GFP_USER);
        if (!page)
@@ -348,8 +347,8 @@ static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *
        mnt = ERR_PTR(-ENOENT);
 
        parent = dget_parent(dentry);
-       dprintk("%s: getting locations for %s/%s\n",
-               __func__, parent->d_name.name, dentry->d_name.name);
+       dprintk("%s: getting locations for %pd2\n",
+               __func__, dentry);
 
        err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
        dput(parent);
index 989bb9d3074d0d4c88d63a486f05491423b090d4..30ffc4a3e42b4e3d75c6dc31a87b0820a8b8d350 100644 (file)
@@ -912,6 +912,7 @@ struct nfs4_opendata {
        struct iattr attrs;
        unsigned long timestamp;
        unsigned int rpc_done : 1;
+       unsigned int file_created : 1;
        unsigned int is_recover : 1;
        int rpc_status;
        int cancelled;
@@ -1946,8 +1947,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
 
        nfs_fattr_map_and_free_names(server, &data->f_attr);
 
-       if (o_arg->open_flags & O_CREAT)
+       if (o_arg->open_flags & O_CREAT) {
                update_changeattr(dir, &o_res->cinfo);
+               if (o_arg->open_flags & O_EXCL)
+                       data->file_created = 1;
+               else if (o_res->cinfo.before != o_res->cinfo.after)
+                       data->file_created = 1;
+       }
        if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
                server->caps &= ~NFS_CAP_POSIX_LOCK;
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -2191,7 +2197,8 @@ static int _nfs4_do_open(struct inode *dir,
                        struct nfs_open_context *ctx,
                        int flags,
                        struct iattr *sattr,
-                       struct nfs4_label *label)
+                       struct nfs4_label *label,
+                       int *opened)
 {
        struct nfs4_state_owner  *sp;
        struct nfs4_state     *state = NULL;
@@ -2261,6 +2268,8 @@ static int _nfs4_do_open(struct inode *dir,
                        nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
                }
        }
+       if (opendata->file_created)
+               *opened |= FILE_CREATED;
 
        if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
                *ctx_th = opendata->f_attr.mdsthreshold;
@@ -2289,7 +2298,8 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
                                        struct nfs_open_context *ctx,
                                        int flags,
                                        struct iattr *sattr,
-                                       struct nfs4_label *label)
+                                       struct nfs4_label *label,
+                                       int *opened)
 {
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs4_exception exception = { };
@@ -2297,7 +2307,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
        int status;
 
        do {
-               status = _nfs4_do_open(dir, ctx, flags, sattr, label);
+               status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
                res = ctx->state;
                trace_nfs4_open_file(ctx, flags, status);
                if (status == 0)
@@ -2659,7 +2669,8 @@ out:
 }
 
 static struct inode *
-nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
+nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+               int open_flags, struct iattr *attr, int *opened)
 {
        struct nfs4_state *state;
        struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
@@ -2667,7 +2678,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
        label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
 
        /* Protect against concurrent sillydeletes */
-       state = nfs4_do_open(dir, ctx, open_flags, attr, label);
+       state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
 
        nfs4_label_release_security(label);
 
@@ -3332,6 +3343,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        struct nfs4_label l, *ilabel = NULL;
        struct nfs_open_context *ctx;
        struct nfs4_state *state;
+       int opened = 0;
        int status = 0;
 
        ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3341,7 +3353,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
 
        sattr->ia_mode &= ~current_umask();
-       state = nfs4_do_open(dir, ctx, flags, sattr, ilabel);
+       state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
        if (IS_ERR(state)) {
                status = PTR_ERR(state);
                goto out;
@@ -3726,9 +3738,8 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        };
        int                     status;
 
-       dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
+       dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
+                       dentry,
                        (unsigned long long)cookie);
        nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
        res.pgbase = args.pgbase;
@@ -5094,6 +5105,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
                        status = 0;
        }
        request->fl_ops->fl_release_private(request);
+       request->fl_ops = NULL;
 out:
        return status;
 }
@@ -7564,8 +7576,10 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
 {
        int err;
        struct page *page;
-       rpc_authflavor_t flavor;
+       rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
        struct nfs4_secinfo_flavors *flavors;
+       struct nfs4_secinfo4 *secinfo;
+       int i;
 
        page = alloc_page(GFP_KERNEL);
        if (!page) {
@@ -7587,9 +7601,31 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
        if (err)
                goto out_freepage;
 
-       flavor = nfs_find_best_sec(flavors);
-       if (err == 0)
-               err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
+       for (i = 0; i < flavors->num_flavors; i++) {
+               secinfo = &flavors->flavors[i];
+
+               switch (secinfo->flavor) {
+               case RPC_AUTH_NULL:
+               case RPC_AUTH_UNIX:
+               case RPC_AUTH_GSS:
+                       flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
+                                       &secinfo->flavor_info);
+                       break;
+               default:
+                       flavor = RPC_AUTH_MAXFLAVOR;
+                       break;
+               }
+
+               if (flavor != RPC_AUTH_MAXFLAVOR) {
+                       err = nfs4_lookup_root_sec(server, fhandle,
+                                                  info, flavor);
+                       if (!err)
+                               break;
+               }
+       }
+
+       if (flavor == RPC_AUTH_MAXFLAVOR)
+               err = -EPERM;
 
 out_freepage:
        put_page(page);
index a8f57c728df561ac58e158c9fb46ca3b7c77004e..fddbba2d9eff028fa5c15e12b799aedf35f40e4d 100644 (file)
@@ -235,7 +235,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        };
        int status = -ENOMEM;
 
-       dprintk("NFS call  create %s\n", dentry->d_name.name);
+       dprintk("NFS call  create %pd\n", dentry);
        data = nfs_alloc_createdata(dir, dentry, sattr);
        if (data == NULL)
                goto out;
@@ -265,7 +265,7 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mknod %s\n", dentry->d_name.name);
+       dprintk("NFS call  mknod %pd\n", dentry);
 
        mode = sattr->ia_mode;
        if (S_ISFIFO(mode)) {
@@ -423,7 +423,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
        };
        int status = -ENAMETOOLONG;
 
-       dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+       dprintk("NFS call  symlink %pd\n", dentry);
 
        if (len > NFS2_MAXPATHLEN)
                goto out;
@@ -462,7 +462,7 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        };
        int status = -ENOMEM;
 
-       dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+       dprintk("NFS call  mkdir %pd\n", dentry);
        data = nfs_alloc_createdata(dir, dentry, sattr);
        if (data == NULL)
                goto out;
index bb939edd4c998cb98b7aeb56ae1aa308e4d9009d..8285de9eaad24eefc3aa7325438e21e98c625d8a 100644 (file)
@@ -495,9 +495,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
        struct rpc_task *task;
        int            error = -EIO;
 
-       dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               d_count(dentry));
+       dfprintk(VFS, "NFS: silly-rename(%pd2, ct=%d)\n",
+               dentry, d_count(dentry));
        nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
 
        /*
@@ -522,8 +521,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
                                SILLYNAME_FILEID_LEN, fileid,
                                SILLYNAME_COUNTER_LEN, sillycounter);
 
-               dfprintk(VFS, "NFS: trying to rename %s to %s\n",
-                               dentry->d_name.name, silly);
+               dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
+                               dentry, silly);
 
                sdentry = lookup_one_len(silly, dentry->d_parent, slen);
                /*
index ac1dc331ba31212108cd5c93352ecdb620122690..c1d548211c31dfab94dec9252cc3d929cfe42daa 100644 (file)
@@ -954,10 +954,8 @@ int nfs_updatepage(struct file *file, struct page *page,
 
        nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
 
-       dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name, count,
-               (long long)(page_file_offset(page) + offset));
+       dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
+               file, count, (long long)(page_file_offset(page) + offset));
 
        if (nfs_can_extend_write(file, page, inode)) {
                count = max(count + offset, nfs_page_length(page));
index e0a65a9e37e97ac1a8702a48487349d599be4aab..9c271f42604a3644e9e826af6f252348e1429249 100644 (file)
@@ -385,8 +385,8 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
 
        status = vfs_rmdir(parent->d_inode, child);
        if (status)
-               printk("failed to remove client recovery directory %s\n",
-                               child->d_name.name);
+               printk("failed to remove client recovery directory %pd\n",
+                               child);
        /* Keep trying, success or failure: */
        return 0;
 }
@@ -410,15 +410,15 @@ out:
        nfs4_release_reclaim(nn);
        if (status)
                printk("nfsd4: failed to purge old clients from recovery"
-                       " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+                       " directory %pD\n", nn->rec_file);
 }
 
 static int
 load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
 {
        if (child->d_name.len != HEXDIR_LEN - 1) {
-               printk("nfsd4: illegal name %s in recovery directory\n",
-                               child->d_name.name);
+               printk("nfsd4: illegal name %pd in recovery directory\n",
+                               child);
                /* Keep trying; maybe the others are OK: */
                return 0;
        }
@@ -437,7 +437,7 @@ nfsd4_recdir_load(struct net *net) {
        status = nfsd4_list_rec_dir(load_recdir, nn);
        if (status)
                printk("nfsd4: failed loading clients from recovery"
-                       " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+                       " directory %pD\n", nn->rec_file);
        return status;
 }
 
index 0874998a49cd40081dc34483bb8f400a64ad2528..a601fd49f997bc441397cb432f2fe0d41dacf8ec 100644 (file)
@@ -3843,9 +3843,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_ol_stateid *stp;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
+                       cstate->current_fh.fh_dentry);
 
        status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
        if (status)
@@ -3922,9 +3921,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        struct nfs4_ol_stateid *stp;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
+                       cstate->current_fh.fh_dentry);
 
        /* We don't yet support WANT bits: */
        if (od->od_deleg_want)
@@ -3980,9 +3978,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_close on file %.*s\n", 
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_close on file %pd\n", 
+                       cstate->current_fh.fh_dentry);
 
        nfs4_lock_state();
        status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
index 814afaa4458a9285fc3168c47d1df6a1f8164c12..3d0e15ae6f726311ef82b337e66bea91a26792e4 100644 (file)
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
                tdentry = parent;
        }
        if (tdentry != exp->ex_path.dentry)
-               dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
+               dprintk("nfsd_acceptable failed at %p %pd\n", tdentry, tdentry);
        rv = (tdentry == exp->ex_path.dentry);
        dput(tdentry);
        return rv;
@@ -253,8 +253,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
 
        if (S_ISDIR(dentry->d_inode->i_mode) &&
                        (dentry->d_flags & DCACHE_DISCONNECTED)) {
-               printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+               printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
+                               dentry);
        }
 
        fhp->fh_dentry = dentry;
@@ -361,10 +361,9 @@ skip_pseudoflavor_check:
        error = nfsd_permission(rqstp, exp, dentry, access);
 
        if (error) {
-               dprintk("fh_verify: %s/%s permission failure, "
+               dprintk("fh_verify: %pd2 permission failure, "
                        "acc=%x, error=%d\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
+                       dentry,
                        access, ntohl(error));
        }
 out:
@@ -514,14 +513,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
         */
 
        struct inode * inode = dentry->d_inode;
-       struct dentry *parent = dentry->d_parent;
        __u32 *datap;
        dev_t ex_dev = exp_sb(exp)->s_dev;
 
-       dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+       dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
                MAJOR(ex_dev), MINOR(ex_dev),
                (long) exp->ex_path.dentry->d_inode->i_ino,
-               parent->d_name.name, dentry->d_name.name,
+               dentry,
                (inode ? inode->i_ino : 0));
 
        /* Choose filehandle version and fsid type based on
@@ -534,13 +532,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
                fh_put(ref_fh);
 
        if (fhp->fh_locked || fhp->fh_dentry) {
-               printk(KERN_ERR "fh_compose: fh %s/%s not initialized!\n",
-                      parent->d_name.name, dentry->d_name.name);
+               printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
+                      dentry);
        }
        if (fhp->fh_maxsize < NFS_FHSIZE)
-               printk(KERN_ERR "fh_compose: called with maxsize %d! %s/%s\n",
+               printk(KERN_ERR "fh_compose: called with maxsize %d! %pd2\n",
                       fhp->fh_maxsize,
-                      parent->d_name.name, dentry->d_name.name);
+                      dentry);
 
        fhp->fh_dentry = dget(dentry); /* our internal copy */
        fhp->fh_export = exp;
@@ -613,8 +611,8 @@ out_bad:
        printk(KERN_ERR "fh_update: fh not verified!\n");
        goto out;
 out_negative:
-       printk(KERN_ERR "fh_update: %s/%s still negative!\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       printk(KERN_ERR "fh_update: %pd2 still negative!\n",
+               dentry);
        goto out;
 }
 
index e5e6707ba687715809ef510b4167f011fb40b172..4775bc4896c8b363123f852d1fd517f59fe7fd6e 100644 (file)
@@ -173,8 +173,8 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
        BUG_ON(!dentry);
 
        if (fhp->fh_locked) {
-               printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name);
+               printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
+                       dentry);
                return;
        }
 
index c827acb0e943bdab08fd077e56228b170a6a9549..13886f7f40d5e8a868182936d3cda837492b7b61 100644 (file)
@@ -1317,9 +1317,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                if (!fhp->fh_locked) {
                        /* not actually possible */
                        printk(KERN_ERR
-                               "nfsd_create: parent %s/%s not locked!\n",
-                               dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+                               "nfsd_create: parent %pd2 not locked!\n",
+                               dentry);
                        err = nfserr_io;
                        goto out;
                }
@@ -1329,8 +1328,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
         */
        err = nfserr_exist;
        if (dchild->d_inode) {
-               dprintk("nfsd_create: dentry %s/%s not negative!\n",
-                       dentry->d_name.name, dchild->d_name.name);
+               dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
+                       dentry, dchild);
                goto out; 
        }
 
index 08fdb77852acd4f2ca692c5c8eb010e55b00aaf5..7aeb8ee013050453d136fc86e80022616118a07f 100644 (file)
@@ -153,8 +153,8 @@ const struct file_operations nilfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = nilfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = nilfs_compat_ioctl,
index 7e350c562e0ea1dd491a8ee6c72371b770666e1f..4a99a24b54a276108d584b4f540936e8e16c4699 100644 (file)
@@ -298,8 +298,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-               loff_t offset, unsigned long nr_segs)
+nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+               loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -310,7 +310,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  nilfs_get_block);
 
        /*
@@ -319,7 +319,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        if (unlikely((rw & WRITE) && size < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        nilfs_write_failed(mapping, end);
index 0ba679866e504ec126720f3eb7b78e210703f538..da276640f7763d2463c0fa99cb832a86d243f10c 100644 (file)
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
        clear_buffer_nilfs_volatile(bh);
        clear_buffer_nilfs_checked(bh);
        clear_buffer_nilfs_redirected(bh);
+       clear_buffer_async_write(bh);
        clear_buffer_dirty(bh);
        if (nilfs_page_buffers_clean(page))
                __nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
                                        "discard block %llu, size %zu",
                                        (u64)bh->b_blocknr, bh->b_size);
                        }
+                       clear_buffer_async_write(bh);
                        clear_buffer_dirty(bh);
                        clear_buffer_nilfs_volatile(bh);
                        clear_buffer_nilfs_checked(bh);
index bd88a7461063bba02f31c6f873902c9c8e87e943..9f6b486b6c01a0e6711ca5930e78474d0739e35e 100644 (file)
@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 
                bh = head = page_buffers(page);
                do {
-                       if (!buffer_dirty(bh))
+                       if (!buffer_dirty(bh) || buffer_async_write(bh))
                                continue;
                        get_bh(bh);
                        list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
                        do {
-                               if (buffer_dirty(bh)) {
+                               if (buffer_dirty(bh) &&
+                                               !buffer_async_write(bh)) {
                                        get_bh(bh);
                                        list_add_tail(&bh->b_assoc_buffers,
                                                      listp);
@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
                                    b_assoc_buffers) {
+                       set_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page) {
                                        lock_page(bd_page);
@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       set_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        lock_page(bd_page);
@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
        list_for_each_entry(segbuf, logs, sb_list) {
                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
                                    b_assoc_buffers) {
+                       clear_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page)
                                        end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       clear_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                    b_assoc_buffers) {
                        set_buffer_uptodate(bh);
                        clear_buffer_dirty(bh);
+                       clear_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page)
                                        end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                    b_assoc_buffers) {
                        set_buffer_uptodate(bh);
                        clear_buffer_dirty(bh);
+                       clear_buffer_async_write(bh);
                        clear_buffer_delay(bh);
                        clear_buffer_nilfs_volatile(bh);
                        clear_buffer_nilfs_redirected(bh);
index f37d3c0e20535eacce542c7184f48d8b509884a2..2921dcf300d36c62fc2403fcfbd066c7c0ed1df6 100644 (file)
@@ -616,9 +616,8 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
 
 static ssize_t ocfs2_direct_IO(int rw,
                               struct kiocb *iocb,
-                              const struct iovec *iov,
-                              loff_t offset,
-                              unsigned long nr_segs)
+                              struct iov_iter *iter,
+                              loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file)->i_mapping->host;
@@ -635,8 +634,7 @@ static ssize_t ocfs2_direct_IO(int rw,
                return 0;
 
        return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
-                                   iov, offset, nr_segs,
-                                   ocfs2_direct_IO_get_blocks,
+                                   iter, offset, ocfs2_direct_IO_get_blocks,
                                    ocfs2_dio_end_io, NULL, 0);
 }
 
index f671e49beb348b5c33dfba3e3c4b5f322c73e43b..573f41d1e45935ff67ea600d6c54edb3df0c0829 100644 (file)
@@ -74,7 +74,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
 /*
  * Using a named enum representing lock types in terms of #N bit stored in
  * iocb->private, which is going to be used for communication between
- * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
+ * ocfs2_dio_end_io() and ocfs2_file_write/read_iter().
  */
 enum ocfs2_iocb_lock_bits {
        OCFS2_IOCB_RW_LOCK = 0,
index ef999729e274ead1ed88c699ee86b8011fd5ff77..0d3a97d2d5f659caeb60f20f448ce25371036ff2 100644 (file)
@@ -70,9 +70,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
         */
        if (inode == NULL) {
                unsigned long gen = (unsigned long) dentry->d_fsdata;
-               unsigned long pgen =
-                       OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
-
+               unsigned long pgen;
+               spin_lock(&dentry->d_lock);
+               pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
+               spin_unlock(&dentry->d_lock);
                trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
                                                       dentry->d_name.name,
                                                       pgen, gen);
index d71903c6068b94f37836854762691a7a1c9d9f12..1d85492684ac02932617d107d969c00c9092bb64 100644 (file)
@@ -2217,15 +2217,13 @@ out:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs,
-                                   loff_t pos)
+static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter,
+                                    loff_t pos)
 {
        int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
        int can_do_direct, has_refcount = 0;
        ssize_t written = 0;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        loff_t old_size, *ppos = &iocb->ki_pos;
        u32 old_clusters;
@@ -2236,11 +2234,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
        int unaligned_dio = 0;
 
-       trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+       trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
                file->f_path.dentry->d_name.len,
                file->f_path.dentry->d_name.name,
-               (unsigned int)nr_segs);
+               (unsigned long)pos);
 
        if (iocb->ki_nbytes == 0)
                return 0;
@@ -2340,28 +2338,24 @@ relock:
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
-       ret = generic_segment_checks(iov, &nr_segs, &ocount,
-                                    VERIFY_READ);
-       if (ret)
-               goto out_dio;
 
-       count = ocount;
+       count = iov_iter_count(iter);
        ret = generic_write_checks(file, ppos, &count,
                                   S_ISBLK(inode->i_mode));
        if (ret)
                goto out_dio;
 
        if (direct_io) {
-               written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
-                                                   ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, *ppos,
+                                                   ppos, count);
                if (written < 0) {
                        ret = written;
                        goto out_dio;
                }
        } else {
                current->backing_dev_info = file->f_mapping->backing_dev_info;
-               written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
-                                                     ppos, count, 0);
+               written = generic_file_buffered_write_iter(iocb, iter, *ppos,
+                                                          ppos, count, 0);
                current->backing_dev_info = NULL;
        }
 
@@ -2517,7 +2511,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
                        in->f_path.dentry->d_name.name, len);
 
        /*
-        * See the comment in ocfs2_file_aio_read()
+        * See the comment in ocfs2_file_read_iter()
         */
        ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2532,19 +2526,18 @@ bail:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs,
-                                  loff_t pos)
+static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter,
+                                   loff_t pos)
 {
        int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
        struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
 
-       trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+       trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
                        filp->f_path.dentry->d_name.len,
-                       filp->f_path.dentry->d_name.name, nr_segs);
+                       filp->f_path.dentry->d_name.name, pos);
 
 
        if (!inode) {
@@ -2580,7 +2573,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         *
         * Take and drop the meta data lock to update inode fields
         * like i_size. This allows the checks down below
-        * generic_file_aio_read() a chance of actually working.
+        * generic_file_read_iter() a chance of actually working.
         */
        ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2589,13 +2582,13 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
        }
        ocfs2_inode_unlock(inode, lock_level);
 
-       ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
-       trace_generic_file_aio_read_ret(ret);
+       ret = generic_file_read_iter(iocb, iter, iocb->ki_pos);
+       trace_generic_file_read_iter_ret(ret);
 
        /* buffered aio wouldn't have proper lock coverage today */
        BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
 
-       /* see ocfs2_file_aio_write */
+       /* see ocfs2_file_write_iter */
        if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
                rw_level = -1;
                have_alloc_sem = 0;
@@ -2683,8 +2676,8 @@ const struct file_operations ocfs2_fops = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
@@ -2731,8 +2724,8 @@ const struct file_operations ocfs2_fops_no_plocks = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
index 1b60c62aa9d6fa62940c0a9b8f9889943604170e..67f08ba77260674c690e1b9121262a72a3327f6a 100644 (file)
@@ -1310,13 +1310,13 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_write_iter);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_read_iter);
 
 DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
 
@@ -1474,7 +1474,7 @@ TRACE_EVENT(ocfs2_prepare_inode_for_write,
                  __entry->direct_io, __entry->has_refcount)
 );
 
-DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+DEFINE_OCFS2_INT_EVENT(generic_file_read_iter_ret);
 
 /* End of trace events for fs/ocfs2/file.c. */
 
index 121da2dc3be841e579dd64fdea6bcfb89f5c121c..d4e81e4a9b0489de2eb66899eb23b9d87ea81ae5 100644 (file)
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 {
        int tmp, hangup_needed = 0;
        struct ocfs2_super *osb = NULL;
-       char nodestr[8];
+       char nodestr[12];
 
        trace_ocfs2_dismount_volume(sb);
 
index 54d57d6ba68dd5b91df6cbc9269e1ccf3c05a950..0fe505b2cded1e899e59a33cb20001127cafb36e 100644 (file)
@@ -339,8 +339,8 @@ const struct file_operations omfs_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
        .write = do_sync_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .mmap = generic_file_mmap,
        .fsync = generic_file_fsync,
        .splice_read = generic_file_splice_read,
index 9f8ef9b7674db1ca1004b682b40a6e5500dcfc24..8eaa1ba793fc188879d405e768a81aedbaa4bf76 100644 (file)
@@ -288,10 +288,14 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct proc_dir_entry *pde = PDE(file_inode(file));
-       int rv = -EIO;
-       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+       unsigned long rv = -EIO;
+       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
        if (use_pde(pde)) {
-               get_unmapped_area = pde->proc_fops->get_unmapped_area;
+#ifdef CONFIG_MMU
+               get_unmapped_area = current->mm->get_unmapped_area;
+#endif
+               if (pde->proc_fops->get_unmapped_area)
+                       get_unmapped_area = pde->proc_fops->get_unmapped_area;
                if (get_unmapped_area)
                        rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
                unuse_pde(pde);
index 6b6a993b5c25a0ccb277bcf53b0c19489582f316..ffeb202ec942d3f3f83594d517e329bafa89ce98 100644 (file)
@@ -36,18 +36,10 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
        return NULL;
 }
 
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
-                               void *cookie)
-{
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               kfree(s);
-}
-
 static const struct inode_operations proc_self_inode_operations = {
        .readlink       = proc_self_readlink,
        .follow_link    = proc_self_follow_link,
-       .put_link       = proc_self_put_link,
+       .put_link       = kfree_put_link,
 };
 
 static unsigned self_inum;
index 7366e9d63cee7d8658e9fed949a2fe7b22519c6f..390bdab01c3c782cc14b55c6d1ef35ebddcc4197 100644 (file)
@@ -941,6 +941,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
                frame = pte_pfn(pte);
                flags = PM_PRESENT;
                page = vm_normal_page(vma, addr, pte);
+               if (pte_soft_dirty(pte))
+                       flags2 |= __PM_SOFT_DIRTY;
        } else if (is_swap_pte(pte)) {
                swp_entry_t entry;
                if (pte_swp_soft_dirty(pte))
@@ -960,7 +962,7 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 
        if (page && !PageAnon(page))
                flags |= PM_FILE;
-       if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
+       if ((vma->vm_flags & VM_SOFTDIRTY))
                flags2 |= __PM_SOFT_DIRTY;
 
        *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
index 4884ac5ae9bea224517e384588847a44f4f8e462..c4d8572a37dfcf89be3e0f1d981a2186b96a5fc4 100644 (file)
@@ -39,9 +39,9 @@ const struct address_space_operations ramfs_aops = {
 
 const struct file_operations ramfs_file_operations = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = noop_fsync,
        .splice_read    = generic_file_splice_read,
index 8d5b438cc18859868fc0cf2206fa51d147ba9eb4..f2487c3cc3f33bd004075b633a4204d11a839a5b 100644 (file)
@@ -39,9 +39,9 @@ const struct file_operations ramfs_file_operations = {
        .mmap                   = ramfs_nommu_mmap,
        .get_unmapped_area      = ramfs_nommu_get_unmapped_area,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .write                  = do_sync_write,
-       .aio_write              = generic_file_aio_write,
+       .write_iter             = generic_file_write_iter,
        .fsync                  = noop_fsync,
        .splice_read            = generic_file_splice_read,
        .splice_write           = generic_file_splice_write,
index e3cd280b158c1132a98c1b53ab2ab8bcdb14de02..296b5711a78b616523215d6e0b8e621433a9b32e 100644 (file)
@@ -29,7 +29,7 @@ typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
 const struct file_operations generic_ro_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
 };
@@ -359,6 +359,29 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
        return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
+ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                   unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->read_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_WRITE);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->read_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_read(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -369,7 +392,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
        kiocb.ki_pos = *ppos;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -384,7 +407,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+       if (!file_readable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
                return -EFAULT;
@@ -408,6 +431,29 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
 EXPORT_SYMBOL(vfs_read);
 
+ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                    unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->write_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_READ);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->write_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_write(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
@@ -418,7 +464,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
        kiocb.ki_pos = *ppos;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -433,7 +479,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
        const char __user *p;
        ssize_t ret;
 
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
 
        old_fs = get_fs();
@@ -460,7 +506,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_READ, buf, count)))
                return -EFAULT;
@@ -745,10 +791,12 @@ static ssize_t do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -778,7 +826,7 @@ ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                return -EINVAL;
 
        return do_readv_writev(READ, file, vec, vlen, pos);
@@ -791,7 +839,7 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                return -EINVAL;
 
        return do_readv_writev(WRITE, file, vec, vlen, pos);
@@ -927,10 +975,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -965,7 +1015,7 @@ static size_t compat_readv(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                goto out;
 
        ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
@@ -1032,7 +1082,7 @@ static size_t compat_writev(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                goto out;
 
        ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
index dcaafcfc23b007c845f490a4f293cc485c324b51..f98feb229ec4646613463a305f0a3db275ad1e8d 100644 (file)
@@ -245,8 +245,8 @@ const struct file_operations reiserfs_file_operations = {
        .open = reiserfs_file_open,
        .release = reiserfs_file_release,
        .fsync = reiserfs_sync_file,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .splice_read = generic_file_splice_read,
        .splice_write = generic_file_splice_write,
        .llseek = generic_file_llseek,
index ad62bdbb451ee77f1cfcee9be8cae70537168667..6d652af02c5b2c7f8bfc449451c909e0ae1ec6cf 100644 (file)
@@ -3083,14 +3083,13 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 /* We thank Mingming Cao for helping us understand in great detail what
    to do in this section of the code. */
 static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-                                 const struct iovec *iov, loff_t offset,
-                                 unsigned long nr_segs)
+                                 struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  reiserfs_get_blocks_direct_io);
 
        /*
@@ -3099,7 +3098,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
                        truncate_setsize(inode, isize);
index 73feacc49b2ef3bc2b088f7d74ed05d4195a45ac..fd777032c2ba7551dd10fbf9572289b36d09ad27 100644 (file)
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
        return NULL;
 }
 
-static int newer_jl_done(struct reiserfs_journal_cnode *cn)
-{
-       struct super_block *sb = cn->sb;
-       b_blocknr_t blocknr = cn->blocknr;
-
-       cn = cn->hprev;
-       while (cn) {
-               if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
-                   atomic_read(&cn->jlist->j_commit_left) != 0)
-                                   return 0;
-               cn = cn->hprev;
-       }
-       return 1;
-}
-
 static void remove_journal_hash(struct super_block *,
                                struct reiserfs_journal_cnode **,
                                struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
                reiserfs_warning(s, "clm-2048", "called with wcount %d",
                                 atomic_read(&journal->j_wcount));
        }
-       BUG_ON(jl->j_trans_id == 0);
 
        /* if flushall == 0, the lock is already held */
        if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
        return err;
 }
 
-static int test_transaction(struct super_block *s,
-                            struct reiserfs_journal_list *jl)
-{
-       struct reiserfs_journal_cnode *cn;
-
-       if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
-               return 1;
-
-       cn = jl->j_realblock;
-       while (cn) {
-               /* if the blocknr == 0, this has been cleared from the hash,
-                ** skip it
-                */
-               if (cn->blocknr == 0) {
-                       goto next;
-               }
-               if (cn->bh && !newer_jl_done(cn))
-                       return 0;
-             next:
-               cn = cn->next;
-               cond_resched();
-       }
-       return 0;
-}
-
 static int write_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl,
                                 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
                        break;
                tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
        }
+       get_journal_list(jl);
+       get_journal_list(flush_jl);
        /* try to find a group of blocks we can flush across all the
         ** transactions, but only bother if we've actually spanned
         ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
                ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
        }
        flush_journal_list(s, flush_jl, 1);
+       put_journal_list(s, flush_jl);
+       put_journal_list(s, jl);
        return 0;
 }
 
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
        return 1;
 }
 
-static void flush_old_journal_lists(struct super_block *s)
-{
-       struct reiserfs_journal *journal = SB_JOURNAL(s);
-       struct reiserfs_journal_list *jl;
-       struct list_head *entry;
-       time_t now = get_seconds();
-
-       while (!list_empty(&journal->j_journal_list)) {
-               entry = journal->j_journal_list.next;
-               jl = JOURNAL_LIST_ENTRY(entry);
-               /* this check should always be run, to send old lists to disk */
-               if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
-                   atomic_read(&jl->j_commit_left) == 0 &&
-                   test_transaction(s, jl)) {
-                       flush_used_journal_lists(s, jl);
-               } else {
-                       break;
-               }
-       }
-}
-
 /*
 ** long and ugly.  If flush, will not return until all commit
 ** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
                        }
                }
        }
-       flush_old_journal_lists(sb);
 
        journal->j_current_jl->j_list_bitmap =
            get_list_bitmap(sb, journal->j_current_jl);
index f373bde8f545da481ba0a7caa873271dd599b30d..f8a9e2bf8d8bf3b04b9a68133cbbfb62e4d2f49e 100644 (file)
@@ -73,7 +73,7 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
 const struct file_operations romfs_ro_fops = {
        .llseek                 = generic_file_llseek,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .splice_read            = generic_file_splice_read,
        .mmap                   = romfs_mmap,
        .get_unmapped_area      = romfs_get_unmapped_area,
index c219e733f55330741962173e994ac8d4e894a70f..083dc0ac91408870254cac60ed4b06580deba610 100644 (file)
@@ -94,7 +94,7 @@ retry:
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-       struct fd f = fdget(fd);
+       struct fd f = fdget_raw(fd);
        int error = -EBADF;
        if (f.file) {
                error = vfs_statfs(&f.file->f_path, st);
index 3a96c9783a8b959015af96e0b0548bd8e51cc606..0225c20f877047abb0a47a856c6099594964b6aa 100644 (file)
@@ -264,6 +264,8 @@ out_free_sb:
  */
 static inline void destroy_super(struct super_block *s)
 {
+       list_lru_destroy(&s->s_dentry_lru);
+       list_lru_destroy(&s->s_inode_lru);
 #ifdef CONFIG_SMP
        free_percpu(s->s_files);
 #endif
@@ -323,8 +325,6 @@ void deactivate_locked_super(struct super_block *s)
 
                /* caches are now gone, we can safely kill the shrinker now */
                unregister_shrinker(&s->s_shrink);
-               list_lru_destroy(&s->s_dentry_lru);
-               list_lru_destroy(&s->s_inode_lru);
 
                put_filesystem(fs);
                put_super(s);
index 9d4dc6831792a23270148c2d26b0423f656b505c..ff4b363ba5c95c78bbe92a4203dbd86e338c28de 100644 (file)
@@ -22,9 +22,9 @@
 const struct file_operations sysv_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index d0c6a007ce835cf869fac695eb5445b34be6d814..eda10959714f2acad6ce5d9be82fffbe9426513b 100644 (file)
@@ -487,6 +487,7 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_sb = sb;
        sbi->s_block_base = 0;
        sbi->s_type = FSTYPE_V7;
+       mutex_init(&sbi->s_lock);
        sb->s_fs_info = sbi;
        
        sb_set_blocksize(sb, 512);
index 6b4947f75af7a00599c129b2d4e5a97a27222f4a..ea41649e4ca55e299853bb2dff7592c5ad171f2c 100644 (file)
@@ -192,8 +192,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
        struct ubifs_dent_node *dent;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-       dbg_gen("'%.*s' in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+       dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
 
        if (dentry->d_name.len > UBIFS_MAX_NLEN)
                return ERR_PTR(-ENAMETOOLONG);
@@ -225,8 +224,8 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
                 * checking.
                 */
                err = PTR_ERR(inode);
-               ubifs_err("dead directory entry '%.*s', error %d",
-                         dentry->d_name.len, dentry->d_name.name, err);
+               ubifs_err("dead directory entry '%pd', error %d",
+                         dentry, err);
                ubifs_ro_mode(c, err);
                goto out;
        }
@@ -260,8 +259,8 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
         * parent directory inode.
         */
 
-       dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
 
        err = ubifs_budget_space(c, &req);
        if (err)
@@ -509,8 +508,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
         * changing the parent inode.
         */
 
-       dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+       dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu",
+               dentry, inode->i_ino,
                inode->i_nlink, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -566,8 +565,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
         * deletions.
         */
 
-       dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+       dbg_gen("dent '%pd' from ino %lu (nlink %d) in dir ino %lu",
+               dentry, inode->i_ino,
                inode->i_nlink, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -656,8 +655,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
         * because we have extra space reserved for deletions.
         */
 
-       dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
-               dentry->d_name.name, inode->i_ino, dir->i_ino);
+       dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry,
+               inode->i_ino, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
        err = check_dir_empty(c, dentry->d_inode);
@@ -716,8 +715,8 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
 
        err = ubifs_budget_space(c, &req);
        if (err)
@@ -778,8 +777,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s' in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+       dbg_gen("dent '%pd' in dir ino %lu", dentry, dir->i_ino);
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -853,8 +851,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s', target '%s' in dir ino %lu", dentry->d_name.len,
-               dentry->d_name.name, symname, dir->i_ino);
+       dbg_gen("dent '%pd', target '%s' in dir ino %lu", dentry,
+               symname, dir->i_ino);
 
        if (len > UBIFS_MAX_INO_DATA)
                return -ENAMETOOLONG;
@@ -979,10 +977,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * separately.
         */
 
-       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
-               old_dentry->d_name.len, old_dentry->d_name.name,
-               old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
-               new_dentry->d_name.name, new_dir->i_ino);
+       dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
+               old_dentry, old_inode->i_ino, old_dir->i_ino,
+               new_dentry, new_dir->i_ino);
        ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
        ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
        if (unlink)
index 123c79b7261ef8092e57477bd1141d365a0a2a3f..22924e048ac04aab77dba789e096a093f118e50f 100644 (file)
@@ -44,7 +44,7 @@
  * 'ubifs_writepage()' we are only guaranteed that the page is locked.
  *
  * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
- * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
+ * read-ahead path does not lock it ("sys_read -> generic_file_read_iter ->
  * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
  * set as well. However, UBIFS disables readahead.
  */
@@ -1396,8 +1396,8 @@ static int update_mctime(struct ubifs_info *c, struct inode *inode)
        return 0;
 }
 
-static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos)
+static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        int err;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -1407,7 +1407,7 @@ static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                return err;
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int ubifs_set_page_dirty(struct page *page)
@@ -1583,8 +1583,8 @@ const struct file_operations ubifs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ubifs_aio_write,
+       .read_iter       = generic_file_read_iter,
+       .write_iter      = ubifs_write_iter,
        .mmap           = ubifs_file_mmap,
        .fsync          = ubifs_fsync,
        .unlocked_ioctl = ubifs_ioctl,
index 76ca53cd3eeec52c53ab064221a6519d8c70c80c..9718da86ad01a804db1e3c7e2a2ca6923f6299a3 100644 (file)
@@ -668,8 +668,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
        ubifs_assert(!wbuf->used);
 
        for (i = 0; ; i++) {
-               int space_before = c->leb_size - wbuf->offs - wbuf->used;
-               int space_after;
+               int space_before, space_after;
 
                cond_resched();
 
index afaad07f3b29f4465ba2b3e0702a947f7b60c071..0e045e75abd8ab7ac1a17cd502c9ff0c22d691e3 100644 (file)
@@ -933,10 +933,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        int move = (old_dir != new_dir);
        struct ubifs_inode *uninitialized_var(new_ui);
 
-       dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
-               old_dentry->d_name.len, old_dentry->d_name.name,
-               old_dir->i_ino, new_dentry->d_name.len,
-               new_dentry->d_name.name, new_dir->i_ino);
+       dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
+               old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
        ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
        ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
        ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
index 0f7139bdb2c20370f7f81489b14db155128c2f62..5e0a63b1b0d54a24d407c08cf1f3fef25008e0f9 100644 (file)
@@ -303,8 +303,8 @@ int ubifs_setxattr(struct dentry *dentry, const char *name,
        union ubifs_key key;
        int err, type;
 
-       dbg_gen("xattr '%s', host ino %lu ('%.*s'), size %zd", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
+               host->i_ino, dentry, size);
        ubifs_assert(mutex_is_locked(&host->i_mutex));
 
        if (size > UBIFS_MAX_INO_DATA)
@@ -367,8 +367,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
        union ubifs_key key;
        int err;
 
-       dbg_gen("xattr '%s', ino %lu ('%.*s'), buf size %zd", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
+               host->i_ino, dentry, size);
 
        err = check_namespace(&nm);
        if (err < 0)
@@ -426,8 +426,8 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        int err, len, written = 0;
        struct qstr nm = { .name = NULL };
 
-       dbg_gen("ino %lu ('%.*s'), buffer size %zd", host->i_ino,
-               dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
+               dentry, size);
 
        len = host_ui->xattr_names + host_ui->xattr_cnt;
        if (!buffer)
@@ -529,8 +529,8 @@ int ubifs_removexattr(struct dentry *dentry, const char *name)
        union ubifs_key key;
        int err;
 
-       dbg_gen("xattr '%s', ino %lu ('%.*s')", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name);
+       dbg_gen("xattr '%s', ino %lu ('%pd')", name,
+               host->i_ino, dentry);
        ubifs_assert(mutex_is_locked(&host->i_mutex));
 
        err = check_namespace(&nm);
index c02a27a19c6df0984eb3ea13fc5a06c5e251aaa5..9985beecffcabbe5fd97680ef1902e55f3d02ee7 100644 (file)
@@ -119,8 +119,7 @@ static int udf_adinicb_write_end(struct file *file,
 }
 
 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
-                                    const struct iovec *iov,
-                                    loff_t offset, unsigned long nr_segs)
+                                    struct iov_iter *iter, loff_t offset)
 {
        /* Fallback to buffered I/O. */
        return 0;
@@ -134,8 +133,8 @@ const struct address_space_operations udf_adinicb_aops = {
        .direct_IO      = udf_adinicb_direct_IO,
 };
 
-static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t ppos)
+static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                  loff_t ppos)
 {
        ssize_t retval;
        struct file *file = iocb->ki_filp;
@@ -169,7 +168,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        } else
                up_write(&iinfo->i_data_sem);
 
-       retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+       retval = generic_file_write_iter(iocb, iter, ppos);
        if (retval > 0)
                mark_inode_dirty(inode);
 
@@ -243,12 +242,12 @@ static int udf_release_file(struct inode *inode, struct file *filp)
 
 const struct file_operations udf_file_operations = {
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .unlocked_ioctl         = udf_ioctl,
        .open                   = generic_file_open,
        .mmap                   = generic_file_mmap,
        .write                  = do_sync_write,
-       .aio_write              = udf_file_aio_write,
+       .write_iter             = udf_file_write_iter,
        .release                = udf_release_file,
        .fsync                  = generic_file_fsync,
        .splice_read            = generic_file_splice_read,
index 7e5aae4bf46fd1c1e65da56615238ffe4944fd3b..6eaf5edf1ea1577e88cafc60184963e1b18df5a5 100644 (file)
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       mutex_lock(&sbi->s_alloc_mutex);
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
+       if (lvidiu) {
+               mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(inode->i_mode))
                        le32_add_cpu(&lvidiu->numDirs, -1);
                else
                        le32_add_cpu(&lvidiu->numFiles, -1);
                udf_updated_lvid(sb);
+               mutex_unlock(&sbi->s_alloc_mutex);
        }
-       mutex_unlock(&sbi->s_alloc_mutex);
 
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
+       struct logicalVolIntegrityDescImpUse *lvidiu;
 
        inode = new_inode(sb);
 
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                return NULL;
        }
 
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu;
-
+       lvidiu = udf_sb_lvidiu(sb);
+       if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
                mutex_lock(&sbi->s_alloc_mutex);
-               lvidiu = udf_sb_lvidiu(sbi);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
                else
index 062b7925bca04c02949919ac37aab790d7b982a2..986e11ad176b2040f8b61c1ba318a69ecf736aa2 100644 (file)
@@ -216,19 +216,17 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                 udf_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
        if (unlikely(ret < 0 && (rw & WRITE)))
-               udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               udf_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 839a2bad7f45b693db4ed478598b997c42077712..91219385691d8f80d1db9aed3973183bb931a48d 100644 (file)
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
 static int udf_statfs(struct dentry *, struct kstatfs *);
 static int udf_show_options(struct seq_file *, struct dentry *);
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
 {
-       struct logicalVolIntegrityDesc *lvid =
-               (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
-       __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
-       __u32 offset = number_of_partitions * 2 *
-                               sizeof(uint32_t)/sizeof(uint8_t);
+       struct logicalVolIntegrityDesc *lvid;
+       unsigned int partnum;
+       unsigned int offset;
+
+       if (!UDF_SB(sb)->s_lvid_bh)
+               return NULL;
+       lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
+       partnum = le32_to_cpu(lvid->numOfPartitions);
+       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
+            offsetof(struct logicalVolIntegrityDesc, impUse)) /
+            (2 * sizeof(uint32_t)) < partnum) {
+               udf_err(sb, "Logical volume integrity descriptor corrupted "
+                       "(numOfPartitions = %u)!\n", partnum);
+               return NULL;
+       }
+       /* The offset is to skip freeSpaceTable and sizeTable arrays */
+       offset = partnum * 2 * sizeof(uint32_t);
        return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
 }
 
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        struct udf_options uopt;
        struct udf_sb_info *sbi = UDF_SB(sb);
        int error = 0;
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       if (sbi->s_lvid_bh) {
-               int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+       if (lvidiu) {
+               int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
                        return -EACCES;
        }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
 
        if (!bh)
                return;
-
-       mutex_lock(&sbi->s_alloc_mutex);
        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
+       mutex_lock(&sbi->s_alloc_mutex);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
 
        if (!bh)
                return;
+       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
        mutex_lock(&sbi->s_alloc_mutex);
-       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 
        if (sbi->s_lvid_bh) {
                struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
-               uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
-               uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               /* uint16_t maxUDFWriteRev =
-                               le16_to_cpu(lvidiu->maxUDFWriteRev); */
+                                                       udf_sb_lvidiu(sb);
+               uint16_t minUDFReadRev;
+               uint16_t minUDFWriteRev;
 
+               if (!lvidiu) {
+                       ret = -EINVAL;
+                       goto error_out;
+               }
+               minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
+               minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (minUDFReadRev > UDF_MAX_READ_VERSION) {
                        udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
-                               le16_to_cpu(lvidiu->minUDFReadRev),
+                               minUDFReadRev,
                                UDF_MAX_READ_VERSION);
                        ret = -EINVAL;
                        goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct logicalVolIntegrityDescImpUse *lvidiu;
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
-       if (sbi->s_lvid_bh != NULL)
-               lvidiu = udf_sb_lvidiu(sbi);
-       else
-               lvidiu = NULL;
-
+       lvidiu = udf_sb_lvidiu(sb);
        buf->f_type = UDF_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
index ed401e94aa8c956dd8685ab33f496fc4ea52eec7..1f32c7bd9f57f21fb2859413cacde08b2949fd3a 100644 (file)
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
index 33afa20d450982eafb4e1bcc77193cce152270d1..e155e4c4af879c46b3bbb682366b1cc99f981aed 100644 (file)
@@ -36,9 +36,9 @@
 const struct file_operations ufs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .fsync          = generic_file_fsync,
index 0719e4db93f274de9af844f3ef66ce387b02a716..33a69fabfd83306b8f6b7fb7f48d328f3e81fa75 100644 (file)
@@ -72,6 +72,7 @@ xfs-y                         += xfs_alloc.o \
                                   xfs_dir2_leaf.o \
                                   xfs_dir2_node.o \
                                   xfs_dir2_sf.o \
+                                  xfs_dquot_buf.o \
                                   xfs_ialloc.o \
                                   xfs_ialloc_btree.o \
                                   xfs_icreate_item.o \
@@ -103,7 +104,11 @@ xfs-$(CONFIG_XFS_QUOTA)            += xfs_dquot.o \
                                   xfs_qm_bhv.o \
                                   xfs_qm.o \
                                   xfs_quotaops.o
-xfs-$(CONFIG_XFS_RT)           += xfs_rtalloc.o
+
+# xfs_rtbitmap is shared with libxfs
+xfs-$(CONFIG_XFS_RT)           += xfs_rtalloc.o \
+                                  xfs_rtbitmap.o
+
 xfs-$(CONFIG_XFS_POSIX_ACL)    += xfs_acl.o
 xfs-$(CONFIG_PROC_FS)          += xfs_stats.o
 xfs-$(CONFIG_SYSCTL)           += xfs_sysctl.o
index 0e2f37efedd0547a05b5bec4c0109db83192bb72..370eb3e121d1b2f292c65ddabd660ad08ae65537 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_acl.h"
+#include "xfs_attr.h"
 #include "xfs_trace.h"
 #include <linux/slab.h>
 #include <linux/xattr.h>
index 1cb740afd674e8fd3f5ce0a3f47bfc00f8b36471..3fc109819c34eb7a42973d7640525fefe66cc08e 100644 (file)
@@ -128,8 +128,6 @@ typedef struct xfs_agf {
 extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
                        xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-
 /*
  * Size of the unlinked inode hash table in the agi.
  */
@@ -191,8 +189,6 @@ typedef struct xfs_agi {
 extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
                                xfs_agnumber_t agno, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 /*
  * The third a.g. block contains the a.g. freelist, an array
  * of block pointers to blocks owned by the allocation btree code.
index 5a1393f5e020739a648612002f7ae9a3f704d032..bcf16528bac5dc87302294e0008557827978269a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 
 struct workqueue_struct *xfs_alloc_wq;
 
index 99d0a61015587f7b8400bb8519bec6717b91c1cf..feacb061bab78bb3492ee0ff37eed5cc0fb95894 100644 (file)
@@ -231,7 +231,4 @@ xfs_alloc_get_rec(
        xfs_extlen_t            *len,   /* output: length of extent */
        int                     *stat); /* output: success/failure */
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-
 #endif /* __XFS_ALLOC_H__ */
index cafc90251d1993e76c10d0b6c6dae9d875dcd21e..698587f6c60a68f84969acb74b6f70f6bce9c006 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC struct xfs_btree_cur *
index e3a3f7424192ecd15a21169d94817e507cc1e6af..45e189e7e81c31b18a0de124cd4d256407d2a120 100644 (file)
@@ -26,39 +26,6 @@ struct xfs_buf;
 struct xfs_btree_cur;
 struct xfs_mount;
 
-/*
- * There are two on-disk btrees, one sorted by blockno and one sorted
- * by blockcount and blockno.  All blocks look the same to make the code
- * simpler; if we have time later, we'll make the optimizations.
- */
-#define        XFS_ABTB_MAGIC          0x41425442      /* 'ABTB' for bno tree */
-#define        XFS_ABTB_CRC_MAGIC      0x41423342      /* 'AB3B' */
-#define        XFS_ABTC_MAGIC          0x41425443      /* 'ABTC' for cnt tree */
-#define        XFS_ABTC_CRC_MAGIC      0x41423343      /* 'AB3C' */
-
-/*
- * Data record/key structure
- */
-typedef struct xfs_alloc_rec {
-       __be32          ar_startblock;  /* starting block number */
-       __be32          ar_blockcount;  /* count of free blocks */
-} xfs_alloc_rec_t, xfs_alloc_key_t;
-
-typedef struct xfs_alloc_rec_incore {
-       xfs_agblock_t   ar_startblock;  /* starting block number */
-       xfs_extlen_t    ar_blockcount;  /* count of free blocks */
-} xfs_alloc_rec_incore_t;
-
-/* btree pointer type */
-typedef __be32 xfs_alloc_ptr_t;
-
-/*
- * Block numbers in the AG:
- * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
- */
-#define        XFS_BNO_BLOCK(mp)       ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
-#define        XFS_CNT_BLOCK(mp)       ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -95,6 +62,4 @@ extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
                xfs_agnumber_t, xfs_btnum_t);
 extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
-
 #endif /* __XFS_ALLOC_BTREE_H__ */
index e51e581454e93113c7ead8d81136ab82bd29da94..20ba95e6096689e0f8c1dcf9b938696f53ab2b53 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
@@ -31,6 +32,8 @@
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
 #include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
@@ -333,7 +336,7 @@ xfs_map_blocks(
 
        if (type == XFS_IO_DELALLOC &&
            (!nimaps || isnullstartblock(imap->br_startblock))) {
-               error = xfs_iomap_write_allocate(ip, offset, count, imap);
+               error = xfs_iomap_write_allocate(ip, offset, imap);
                if (!error)
                        trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
                return -XFS_ERROR(error);
@@ -1413,9 +1416,8 @@ STATIC ssize_t
 xfs_vm_direct_IO(
        int                     rw,
        struct kiocb            *iocb,
-       const struct iovec      *iov,
-       loff_t                  offset,
-       unsigned long           nr_segs)
+       struct iov_iter         *iter,
+       loff_t                  offset)
 {
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
@@ -1423,7 +1425,7 @@ xfs_vm_direct_IO(
        ssize_t                 ret;
 
        if (rw & WRITE) {
-               size_t size = iov_length(iov, nr_segs);
+               size_t size = iov_iter_count(iter);
 
                /*
                 * We cannot preallocate a size update transaction here as we
@@ -1435,15 +1437,13 @@ xfs_vm_direct_IO(
                if (offset + size > XFS_I(inode)->i_d.di_size)
                        ioend->io_isdirect = 1;
 
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL, 0);
                if (ret != -EIOCBQUEUED && iocb->private)
                        goto out_destroy_ioend;
        } else {
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            NULL, NULL, 0);
        }
index ddcf2267ffa6fdf1bcf33cf7439c6b379472c2b4..b86127072ac3c2b9bd201b34cf208c3f1735cd68 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
@@ -41,6 +42,7 @@
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * xfs_attr.c
index bb24b07cbedb3d46a6286e8e169a734ad9fa3baa..f33fb62b7f17d36c3cba87eac0cedaae8475fa49 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
+#include "xfs_inode.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
 #include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
@@ -41,7 +39,7 @@
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
 
 /*
  * Look at all the extents for this logical region,
index 86db20a9cc02b5df2dd7ecb3c44eca39d7498dd7..a0f90193a247bfce514fdc5b13b2aafa6bc98ca4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 
 /*
index c1022138c7e6f3261819a0c52618b6bb1a9cf34f..3ec5ec0b86789004428591b824d982a16262a829 100644 (file)
 #ifndef __XFS_ATTR_LEAF_H__
 #define        __XFS_ATTR_LEAF_H__
 
-/*
- * Attribute storage layout, internal structure, access macros, etc.
- *
- * Attribute lists are structured around Btrees where all the data
- * elements are in the leaf nodes.  Attribute names are hashed into an int,
- * then that int is used as the index into the Btree.  Since the hashval
- * of an attribute name may not be unique, we may have duplicate keys.  The
- * internal links in the Btree are logical block offsets into the file.
- */
-
 struct attrlist;
 struct attrlist_cursor_kern;
 struct xfs_attr_list_context;
@@ -38,226 +28,6 @@ struct xfs_da_state_blk;
 struct xfs_inode;
 struct xfs_trans;
 
-/*========================================================================
- * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top.  Name/values grow from the
- * bottom but are not packed.  The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped.  When the freemap doesn't show enough space
- * for an allocation, we compact the name/value area and try again.  If we
- * still don't have enough space, then we have to split the block.  The
- * name/value structs (both local and remote versions) must be 32bit aligned.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual name string.  The root and intermediate node search always
- * takes the first-in-the-block key match found, so we should only have
- * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
- * nodes until the hash key changes or the attribute name is found.
- *
- * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
- * the leaf_entry.  The namespaces are independent only because we also look
- * at the namespace bit when we are looking for a matching attribute name.
- *
- * We also store an "incomplete" bit in the leaf_entry.  It shows that an
- * attribute is in the middle of being created and should not be shown to
- * the user if we crash during the time that the bit is set.  We clear the
- * bit when we have finished setting up the attribute.  We do this because
- * we cannot create some large attributes inside a single transaction, and we
- * need some indication that we weren't finished if we crash in the middle.
- */
-#define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
-
-typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
-       __be16  base;                     /* base of free region */
-       __be16  size;                     /* length of free region */
-} xfs_attr_leaf_map_t;
-
-typedef struct xfs_attr_leaf_hdr {     /* constant-structure header block */
-       xfs_da_blkinfo_t info;          /* block type, links, etc. */
-       __be16  count;                  /* count of active leaf_entry's */
-       __be16  usedbytes;              /* num bytes of names/values stored */
-       __be16  firstused;              /* first used byte in name area */
-       __u8    holes;                  /* != 0 if blk needs compaction */
-       __u8    pad1;
-       xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
-                                       /* N largest free regions */
-} xfs_attr_leaf_hdr_t;
-
-typedef struct xfs_attr_leaf_entry {   /* sorted on key, not name */
-       __be32  hashval;                /* hash value of name */
-       __be16  nameidx;                /* index into buffer of name/value */
-       __u8    flags;                  /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
-       __u8    pad2;                   /* unused pad byte */
-} xfs_attr_leaf_entry_t;
-
-typedef struct xfs_attr_leaf_name_local {
-       __be16  valuelen;               /* number of bytes in value */
-       __u8    namelen;                /* length of name bytes */
-       __u8    nameval[1];             /* name/value bytes */
-} xfs_attr_leaf_name_local_t;
-
-typedef struct xfs_attr_leaf_name_remote {
-       __be32  valueblk;               /* block number of value bytes */
-       __be32  valuelen;               /* number of bytes in value */
-       __u8    namelen;                /* length of name bytes */
-       __u8    name[1];                /* name bytes */
-} xfs_attr_leaf_name_remote_t;
-
-typedef struct xfs_attr_leafblock {
-       xfs_attr_leaf_hdr_t     hdr;    /* constant-structure header block */
-       xfs_attr_leaf_entry_t   entries[1];     /* sorted on key, not name */
-       xfs_attr_leaf_name_local_t namelist;    /* grows from bottom of buf */
-       xfs_attr_leaf_name_remote_t valuelist;  /* grows from bottom of buf */
-} xfs_attr_leafblock_t;
-
-/*
- * CRC enabled leaf structures. Called "version 3" structures to match the
- * version number of the directory and dablk structures for this feature, and
- * attr2 is already taken by the variable inode attribute fork size feature.
- */
-struct xfs_attr3_leaf_hdr {
-       struct xfs_da3_blkinfo  info;
-       __be16                  count;
-       __be16                  usedbytes;
-       __be16                  firstused;
-       __u8                    holes;
-       __u8                    pad1;
-       struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
-       __be32                  pad2;           /* 64 bit alignment */
-};
-
-#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
-
-struct xfs_attr3_leafblock {
-       struct xfs_attr3_leaf_hdr       hdr;
-       struct xfs_attr_leaf_entry      entries[1];
-
-       /*
-        * The rest of the block contains the following structures after the
-        * leaf entries, growing from the bottom up. The variables are never
-        * referenced, the locations accessed purely from helper functions.
-        *
-        * struct xfs_attr_leaf_name_local
-        * struct xfs_attr_leaf_name_remote
-        */
-};
-
-/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
-       __uint32_t      forw;
-       __uint32_t      back;
-       __uint16_t      magic;
-       __uint16_t      count;
-       __uint16_t      usedbytes;
-       __uint16_t      firstused;
-       __u8            holes;
-       struct {
-               __uint16_t      base;
-               __uint16_t      size;
-       } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
- * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
- */
-#define        XFS_ATTR_LOCAL_BIT      0       /* attr is stored locally */
-#define        XFS_ATTR_ROOT_BIT       1       /* limit access to trusted attrs */
-#define        XFS_ATTR_SECURE_BIT     2       /* limit access to secure attrs */
-#define        XFS_ATTR_INCOMPLETE_BIT 7       /* attr in middle of create/delete */
-#define XFS_ATTR_LOCAL         (1 << XFS_ATTR_LOCAL_BIT)
-#define XFS_ATTR_ROOT          (1 << XFS_ATTR_ROOT_BIT)
-#define XFS_ATTR_SECURE                (1 << XFS_ATTR_SECURE_BIT)
-#define XFS_ATTR_INCOMPLETE    (1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK         (ATTR_ROOT | ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK_MASK       (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags)     ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags)       ((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
-                                        ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
-                                        ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
-
-/*
- * Alignment for namelist and valuelist entries (since they are mixed
- * there can be only one alignment value)
- */
-#define        XFS_ATTR_LEAF_NAME_ALIGN        ((uint)sizeof(xfs_dablk_t))
-
-static inline int
-xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
-{
-       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-               return sizeof(struct xfs_attr3_leaf_hdr);
-       return sizeof(struct xfs_attr_leaf_hdr);
-}
-
-static inline struct xfs_attr_leaf_entry *
-xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
-{
-       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-               return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
-       return &leafp->entries[0];
-}
-
-/*
- * Cast typed pointers for "local" and "remote" name/value structs.
- */
-static inline char *
-xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
-{
-       struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
-
-       return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
-}
-
-static inline xfs_attr_leaf_name_remote_t *
-xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
-{
-       return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-static inline xfs_attr_leaf_name_local_t *
-xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
-{
-       return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-/*
- * Calculate total bytes used (including trailing pad for alignment) for
- * a "local" name/value structure, a "remote" name/value structure, and
- * a pointer which might be either.
- */
-static inline int xfs_attr_leaf_entsize_remote(int nlen)
-{
-       return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
-               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
-{
-       return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
-               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local_max(int bsize)
-{
-       return (((bsize) >> 1) + ((bsize) >> 2));
-}
-
 /*
  * Used to keep a list of "remote value" extents when unlinking an inode.
  */
@@ -336,6 +106,4 @@ void        xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
 void   xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
                                   struct xfs_attr3_icleaf_hdr *from);
 
-extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
-
 #endif /* __XFS_ATTR_LEAF_H__ */
index cbc80d4851772a9df2cb51abbd83e5ef799c6870..46c4ce148a432662c6038376b9ce70497a5c720a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 STATIC int
 xfs_attr_shortform_compare(const void *a, const void *b)
index 712a502de619b097df202f744ba481bd3471847d..2e5530467f2d3fb28a4a1e871c87994994c6c02f 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
@@ -42,6 +40,7 @@
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
 #include "xfs_buf_item.h"
+#include "xfs_error.h"
 
 #define ATTR_RMTVALUE_MAPSIZE  1       /* # of map entries at once */
 
index 92a8fd7977cc2b48649ed3e5c22344fe83fcd8c1..5a9acfa156d7af59855fde90fb5e6d8721ea289b 100644 (file)
 #ifndef __XFS_ATTR_REMOTE_H__
 #define        __XFS_ATTR_REMOTE_H__
 
-#define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
-
-/*
- * There is one of these headers per filesystem block in a remote attribute.
- * This is done to ensure there is a 1:1 mapping between the attribute value
- * length and the number of blocks needed to store the attribute. This makes the
- * verification of a buffer a little more complex, but greatly simplifies the
- * allocation, reading and writing of these attributes as we don't have to guess
- * the number of blocks needed to store the attribute data.
- */
-struct xfs_attr3_rmt_hdr {
-       __be32  rm_magic;
-       __be32  rm_offset;
-       __be32  rm_bytes;
-       __be32  rm_crc;
-       uuid_t  rm_uuid;
-       __be64  rm_owner;
-       __be64  rm_blkno;
-       __be64  rm_lsn;
-};
-
-#define XFS_ATTR3_RMT_CRC_OFF  offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
-
-#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)   \
-       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
-                       sizeof(struct xfs_attr3_rmt_hdr) : 0))
-
-extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
-
 int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
 
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
index 48228848f5ae3e2e6f52900b6d0ea7215274a36d..16ce44a2b43eaac5730be0e3084ed44fd93e86e1 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
 
 /*
  * XFS bit manipulation routines, used in non-realtime code.
index f47e65c30be6ddde5caa276d3262540d603d07dc..1c02da8bb7df5a0bf5729cd5375a0266731e545c 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
-#include "xfs_itable.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr_leaf.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_buf_item.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 
 kmem_zone_t            *xfs_bmap_free_item_zone;
@@ -1482,7 +1480,7 @@ xfs_bmap_search_extents(
                xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
                                "Access to block zero in inode %llu "
                                "start_block: %llx start_off: %llx "
-                               "blkcnt: %llx extent-state: %x lastx: %x\n",
+                               "blkcnt: %llx extent-state: %x lastx: %x",
                        (unsigned long long)ip->i_ino,
                        (unsigned long long)gotp->br_startblock,
                        (unsigned long long)gotp->br_startoff,
index bb8de8e399c4b351effa08e6669e000438751d37..2fb4a2202e1731d153a5250dca593985f0191094 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
-#include "xfs_itable.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Determine the extent state.
index e367461a638e5b65ffdedc77bed6121dc1150161..6e42e1e50b89394e2c0a0a003c339ac03d127445 100644 (file)
 #ifndef __XFS_BMAP_BTREE_H__
 #define __XFS_BMAP_BTREE_H__
 
-#define XFS_BMAP_MAGIC         0x424d4150      /* 'BMAP' */
-#define XFS_BMAP_CRC_MAGIC     0x424d4133      /* 'BMA3' */
-
 struct xfs_btree_cur;
 struct xfs_btree_block;
 struct xfs_mount;
 struct xfs_inode;
 struct xfs_trans;
 
-/*
- * Bmap root header, on-disk form only.
- */
-typedef struct xfs_bmdr_block {
-       __be16          bb_level;       /* 0 is a leaf */
-       __be16          bb_numrecs;     /* current # of data records */
-} xfs_bmdr_block_t;
-
-/*
- * Bmap btree record and extent descriptor.
- *  l0:63 is an extent flag (value 1 indicates non-normal).
- *  l0:9-62 are startoff.
- *  l0:0-8 and l1:21-63 are startblock.
- *  l1:0-20 are blockcount.
- */
-#define BMBT_EXNTFLAG_BITLEN   1
-#define BMBT_STARTOFF_BITLEN   54
-#define BMBT_STARTBLOCK_BITLEN 52
-#define BMBT_BLOCKCOUNT_BITLEN 21
-
-typedef struct xfs_bmbt_rec {
-       __be64                  l0, l1;
-} xfs_bmbt_rec_t;
-
-typedef __uint64_t     xfs_bmbt_rec_base_t;    /* use this for casts */
-typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
-
-typedef struct xfs_bmbt_rec_host {
-       __uint64_t              l0, l1;
-} xfs_bmbt_rec_host_t;
-
-/*
- * Values and macros for delayed-allocation startblock fields.
- */
-#define STARTBLOCKVALBITS      17
-#define STARTBLOCKMASKBITS     (15 + XFS_BIG_BLKNOS * 20)
-#define DSTARTBLOCKMASKBITS    (15 + 20)
-#define STARTBLOCKMASK         \
-       (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define DSTARTBLOCKMASK                \
-       (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-
-static inline int isnullstartblock(xfs_fsblock_t x)
-{
-       return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
-}
-
-static inline int isnulldstartblock(xfs_dfsbno_t x)
-{
-       return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
-}
-
-static inline xfs_fsblock_t nullstartblock(int k)
-{
-       ASSERT(k < (1 << STARTBLOCKVALBITS));
-       return STARTBLOCKMASK | (k);
-}
-
-static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
-{
-       return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
-}
-
-/*
- * Possible extent formats.
- */
-typedef enum {
-       XFS_EXTFMT_NOSTATE = 0,
-       XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
- * Possible extent states.
- */
-typedef enum {
-       XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
-       XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
-} xfs_exntst_t;
-
 /*
  * Extent state and extent format macros.
  */
@@ -114,27 +32,6 @@ typedef enum {
                XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
 #define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
 
-/*
- * Incore version of above.
- */
-typedef struct xfs_bmbt_irec
-{
-       xfs_fileoff_t   br_startoff;    /* starting file offset */
-       xfs_fsblock_t   br_startblock;  /* starting block number */
-       xfs_filblks_t   br_blockcount;  /* number of blocks */
-       xfs_exntst_t    br_state;       /* extent state */
-} xfs_bmbt_irec_t;
-
-/*
- * Key structure for non-leaf levels of the tree.
- */
-typedef struct xfs_bmbt_key {
-       __be64          br_startoff;    /* starting file offset */
-} xfs_bmbt_key_t, xfs_bmdr_key_t;
-
-/* btree pointer type */
-typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -243,6 +140,4 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
 extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
                struct xfs_trans *, struct xfs_inode *, int);
 
-extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
-
 #endif /* __XFS_BMAP_BTREE_H__ */
index 97f952caea74bd8311a3269ca7520e87b01fcb18..5887e41c0323ae85f867cc9bc83bbdf8c1e41cd1 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* Kernel only BMAP related definitions and functions */
 
@@ -965,32 +965,12 @@ xfs_free_eofblocks(
        return error;
 }
 
-/*
- * xfs_alloc_file_space()
- *      This routine allocates disk space for the given file.
- *
- *     If alloc_type == 0, this request is for an ALLOCSP type
- *     request which will change the file size.  In this case, no
- *     DMAPI event will be generated by the call.  A TRUNCATE event
- *     will be generated later by xfs_setattr.
- *
- *     If alloc_type != 0, this request is for a RESVSP type
- *     request, and a DMAPI DM_EVENT_WRITE will be generated if the
- *     lower block boundary byte address is less than the file's
- *     length.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_alloc_file_space(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        xfs_off_t               offset,
        xfs_off_t               len,
-       int                     alloc_type,
-       int                     attr_flags)
+       int                     alloc_type)
 {
        xfs_mount_t             *mp = ip->i_mount;
        xfs_off_t               count;
@@ -1232,24 +1212,11 @@ xfs_zero_remaining_bytes(
        return error;
 }
 
-/*
- * xfs_free_file_space()
- *      This routine frees disk space for the given file.
- *
- *     This routine is only called by xfs_change_file_space
- *     for an UNRESVSP type call.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_free_file_space(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
+       xfs_off_t               len)
 {
        int                     committed;
        int                     done;
@@ -1267,7 +1234,6 @@ xfs_free_file_space(
        int                     rt;
        xfs_fileoff_t           startoffset_fsb;
        xfs_trans_t             *tp;
-       int                     need_iolock = 1;
 
        mp = ip->i_mount;
 
@@ -1284,20 +1250,15 @@ xfs_free_file_space(
        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 
-       if (attr_flags & XFS_ATTR_NOLOCK)
-               need_iolock = 0;
-       if (need_iolock) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               /* wait for the completion of any pending DIOs */
-               inode_dio_wait(VFS_I(ip));
-       }
+       /* wait for the completion of any pending DIOs */
+       inode_dio_wait(VFS_I(ip));
 
        rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
        ioffset = offset & ~(rounding - 1);
        error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                              ioffset, -1);
        if (error)
-               goto out_unlock_iolock;
+               goto out;
        truncate_pagecache_range(VFS_I(ip), ioffset, -1);
 
        /*
@@ -1311,7 +1272,7 @@ xfs_free_file_space(
                error = xfs_bmapi_read(ip, startoffset_fsb, 1,
                                        &imap, &nimap, 0);
                if (error)
-                       goto out_unlock_iolock;
+                       goto out;
                ASSERT(nimap == 0 || nimap == 1);
                if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
                        xfs_daddr_t     block;
@@ -1326,7 +1287,7 @@ xfs_free_file_space(
                error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
                                        &imap, &nimap, 0);
                if (error)
-                       goto out_unlock_iolock;
+                       goto out;
                ASSERT(nimap == 0 || nimap == 1);
                if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
                        ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
@@ -1412,27 +1373,23 @@ xfs_free_file_space(
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
        }
 
- out_unlock_iolock:
-       if (need_iolock)
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ out:
        return error;
 
  error0:
        xfs_bmap_cancel(&free_list);
  error1:
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
-                   XFS_ILOCK_EXCL);
-       return error;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       goto out;
 }
 
 
-STATIC int
+int
 xfs_zero_file_space(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
+       xfs_off_t               len)
 {
        struct xfs_mount        *mp = ip->i_mount;
        uint                    granularity;
@@ -1453,9 +1410,6 @@ xfs_zero_file_space(
        ASSERT(start_boundary >= offset);
        ASSERT(end_boundary <= offset + len);
 
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
        if (start_boundary < end_boundary - 1) {
                /* punch out the page cache over the conversion range */
                truncate_pagecache_range(VFS_I(ip), start_boundary,
@@ -1463,16 +1417,16 @@ xfs_zero_file_space(
                /* convert the blocks */
                error = xfs_alloc_file_space(ip, start_boundary,
                                        end_boundary - start_boundary - 1,
-                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
-                                       attr_flags);
+                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
                if (error)
-                       goto out_unlock;
+                       goto out;
 
                /* We've handled the interior of the range, now for the edges */
-               if (start_boundary != offset)
+               if (start_boundary != offset) {
                        error = xfs_iozero(ip, offset, start_boundary - offset);
-               if (error)
-                       goto out_unlock;
+                       if (error)
+                               goto out;
+               }
 
                if (end_boundary != offset + len)
                        error = xfs_iozero(ip, end_boundary,
@@ -1486,196 +1440,11 @@ xfs_zero_file_space(
                error = xfs_iozero(ip, offset, len);
        }
 
-out_unlock:
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+out:
        return error;
 
 }
 
-/*
- * xfs_change_file_space()
- *      This routine allocates or frees disk space for the given file.
- *      The user specified parameters are checked for alignment and size
- *      limitations.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-int
-xfs_change_file_space(
-       xfs_inode_t     *ip,
-       int             cmd,
-       xfs_flock64_t   *bf,
-       xfs_off_t       offset,
-       int             attr_flags)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       int             clrprealloc;
-       int             error;
-       xfs_fsize_t     fsize;
-       int             setprealloc;
-       xfs_off_t       startoffset;
-       xfs_trans_t     *tp;
-       struct iattr    iattr;
-
-       if (!S_ISREG(ip->i_d.di_mode))
-               return XFS_ERROR(EINVAL);
-
-       switch (bf->l_whence) {
-       case 0: /*SEEK_SET*/
-               break;
-       case 1: /*SEEK_CUR*/
-               bf->l_start += offset;
-               break;
-       case 2: /*SEEK_END*/
-               bf->l_start += XFS_ISIZE(ip);
-               break;
-       default:
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * length of <= 0 for resv/unresv/zero is invalid.  length for
-        * alloc/free is ignored completely and we have no idea what userspace
-        * might have set it to, so set it to zero to allow range
-        * checks to pass.
-        */
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if (bf->l_len <= 0)
-                       return XFS_ERROR(EINVAL);
-               break;
-       default:
-               bf->l_len = 0;
-               break;
-       }
-
-       if (bf->l_start < 0 ||
-           bf->l_start > mp->m_super->s_maxbytes ||
-           bf->l_start + bf->l_len < 0 ||
-           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
-               return XFS_ERROR(EINVAL);
-
-       bf->l_whence = 0;
-
-       startoffset = bf->l_start;
-       fsize = XFS_ISIZE(ip);
-
-       setprealloc = clrprealloc = 0;
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-               error = xfs_zero_file_space(ip, startoffset, bf->l_len,
-                                               attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-               error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
-                                               XFS_BMAPI_PREALLOC, attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
-                                                               attr_flags)))
-                       return error;
-               break;
-
-       case XFS_IOC_ALLOCSP:
-       case XFS_IOC_ALLOCSP64:
-       case XFS_IOC_FREESP:
-       case XFS_IOC_FREESP64:
-               /*
-                * These operations actually do IO when extending the file, but
-                * the allocation is done seperately to the zeroing that is
-                * done. This set of operations need to be serialised against
-                * other IO operations, such as truncate and buffered IO. We
-                * need to take the IOLOCK here to serialise the allocation and
-                * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
-                * truncate, direct IO) from racing against the transient
-                * allocated but not written state we can have here.
-                */
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               if (startoffset > fsize) {
-                       error = xfs_alloc_file_space(ip, fsize,
-                                       startoffset - fsize, 0,
-                                       attr_flags | XFS_ATTR_NOLOCK);
-                       if (error) {
-                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-                               break;
-                       }
-               }
-
-               iattr.ia_valid = ATTR_SIZE;
-               iattr.ia_size = startoffset;
-
-               error = xfs_setattr_size(ip, &iattr,
-                                        attr_flags | XFS_ATTR_NOLOCK);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
-               if (error)
-                       return error;
-
-               clrprealloc = 1;
-               break;
-
-       default:
-               ASSERT(0);
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * update the inode timestamp, mode, and prealloc flag bits
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       if ((attr_flags & XFS_ATTR_DMI) == 0) {
-               ip->i_d.di_mode &= ~S_ISUID;
-
-               /*
-                * Note that we don't have to worry about mandatory
-                * file locking being disabled here because we only
-                * clear the S_ISGID bit if the Group execute bit is
-                * on, but if it was on then mandatory locking wouldn't
-                * have been enabled.
-                */
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
-
-               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       }
-       if (setprealloc)
-               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-       else if (clrprealloc)
-               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       if (attr_flags & XFS_ATTR_SYNC)
-               xfs_trans_set_sync(tp);
-       return xfs_trans_commit(tp, 0);
-}
-
 /*
  * We need to check that the format of the data fork in the temporary inode is
  * valid for the target inode before doing the swap. This is not a problem with
index 061260946f7a85ae7d972ea11f0142daca9ba482..900747b25772c2b1a41821fba8e99c3cde2b3ffc 100644 (file)
@@ -93,9 +93,12 @@ int  xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
                             int *is_empty);
 
 /* preallocation and hole punch interface */
-int    xfs_change_file_space(struct xfs_inode *ip, int cmd,
-                             xfs_flock64_t *bf, xfs_off_t offset,
-                             int attr_flags);
+int    xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                            xfs_off_t len, int alloc_type);
+int    xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                           xfs_off_t len);
+int    xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                           xfs_off_t len);
 
 /* EOF block manipulation functions */
 bool   xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
index 5690e102243d70e7b87876f0ef9bc07be058da86..9adaae4f3e2fd21c647c9e7fa023a120e5012272 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_buf_item.h"
 #include "xfs_btree.h"
index 06729b67ad58ec2c394a3ecdb1104938ced672c2..91e34f21bacea773eaadecfc0a3c29595e912601 100644 (file)
@@ -26,73 +26,6 @@ struct xfs_trans;
 
 extern kmem_zone_t     *xfs_btree_cur_zone;
 
-/*
- * This nonsense is to make -wlint happy.
- */
-#define        XFS_LOOKUP_EQ   ((xfs_lookup_t)XFS_LOOKUP_EQi)
-#define        XFS_LOOKUP_LE   ((xfs_lookup_t)XFS_LOOKUP_LEi)
-#define        XFS_LOOKUP_GE   ((xfs_lookup_t)XFS_LOOKUP_GEi)
-
-#define        XFS_BTNUM_BNO   ((xfs_btnum_t)XFS_BTNUM_BNOi)
-#define        XFS_BTNUM_CNT   ((xfs_btnum_t)XFS_BTNUM_CNTi)
-#define        XFS_BTNUM_BMAP  ((xfs_btnum_t)XFS_BTNUM_BMAPi)
-#define        XFS_BTNUM_INO   ((xfs_btnum_t)XFS_BTNUM_INOi)
-
-/*
- * Generic btree header.
- *
- * This is a combination of the actual format used on disk for short and long
- * format btrees.  The first three fields are shared by both format, but the
- * pointers are different and should be used with care.
- *
- * To get the size of the actual short or long form headers please use the size
- * macros below.  Never use sizeof(xfs_btree_block).
- *
- * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
- * with the crc feature bit, and all accesses to them must be conditional on
- * that flag.
- */
-struct xfs_btree_block {
-       __be32          bb_magic;       /* magic number for block type */
-       __be16          bb_level;       /* 0 is a leaf */
-       __be16          bb_numrecs;     /* current # of data records */
-       union {
-               struct {
-                       __be32          bb_leftsib;
-                       __be32          bb_rightsib;
-
-                       __be64          bb_blkno;
-                       __be64          bb_lsn;
-                       uuid_t          bb_uuid;
-                       __be32          bb_owner;
-                       __le32          bb_crc;
-               } s;                    /* short form pointers */
-               struct  {
-                       __be64          bb_leftsib;
-                       __be64          bb_rightsib;
-
-                       __be64          bb_blkno;
-                       __be64          bb_lsn;
-                       uuid_t          bb_uuid;
-                       __be64          bb_owner;
-                       __le32          bb_crc;
-                       __be32          bb_pad; /* padding for alignment */
-               } l;                    /* long form pointers */
-       } bb_u;                         /* rest */
-};
-
-#define XFS_BTREE_SBLOCK_LEN   16      /* size of a short form block */
-#define XFS_BTREE_LBLOCK_LEN   24      /* size of a long form block */
-
-/* sizes of CRC enabled btree blocks */
-#define XFS_BTREE_SBLOCK_CRC_LEN       (XFS_BTREE_SBLOCK_LEN + 40)
-#define XFS_BTREE_LBLOCK_CRC_LEN       (XFS_BTREE_LBLOCK_LEN + 48)
-
-#define XFS_BTREE_SBLOCK_CRC_OFF \
-       offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
-#define XFS_BTREE_LBLOCK_CRC_OFF \
-       offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
-
 /*
  * Generic key, ptr and record wrapper structures.
  *
@@ -118,6 +51,18 @@ union xfs_btree_rec {
        xfs_inobt_rec_t         inobt;
 };
 
+/*
+ * This nonsense is to make -wlint happy.
+ */
+#define        XFS_LOOKUP_EQ   ((xfs_lookup_t)XFS_LOOKUP_EQi)
+#define        XFS_LOOKUP_LE   ((xfs_lookup_t)XFS_LOOKUP_LEi)
+#define        XFS_LOOKUP_GE   ((xfs_lookup_t)XFS_LOOKUP_GEi)
+
+#define        XFS_BTNUM_BNO   ((xfs_btnum_t)XFS_BTNUM_BNOi)
+#define        XFS_BTNUM_CNT   ((xfs_btnum_t)XFS_BTNUM_CNTi)
+#define        XFS_BTNUM_BMAP  ((xfs_btnum_t)XFS_BTNUM_BMAPi)
+#define        XFS_BTNUM_INO   ((xfs_btnum_t)XFS_BTNUM_INOi)
+
 /*
  * For logging record fields.
  */
index 263470075ea2ce11dd39a83603e0b9fa1f0f5a60..c7f0b77dcb0090046b84eda27c68d870af25d45a 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/freezer.h>
 
-#include "xfs_sb.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
+#include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 static kmem_zone_t *xfs_buf_zone;
 
@@ -590,7 +591,7 @@ found:
                error = _xfs_buf_map_pages(bp, flags);
                if (unlikely(error)) {
                        xfs_warn(target->bt_mount,
-                               "%s: failed to map pages\n", __func__);
+                               "%s: failed to map pagesn", __func__);
                        xfs_buf_relse(bp);
                        return NULL;
                }
@@ -809,7 +810,7 @@ xfs_buf_get_uncached(
        error = _xfs_buf_map_pages(bp, 0);
        if (unlikely(error)) {
                xfs_warn(target->bt_mount,
-                       "%s: failed to map pages\n", __func__);
+                       "%s: failed to map pages", __func__);
                goto fail_free_mem;
        }
 
@@ -1618,7 +1619,7 @@ xfs_setsize_buftarg_flags(
                bdevname(btp->bt_bdev, name);
 
                xfs_warn(btp->bt_mount,
-                       "Cannot set_blocksize to %u on device %s\n",
+                       "Cannot set_blocksize to %u on device %s",
                        sectorsize, name);
                return EINVAL;
        }
index 88c5ea75ebf66abd175bdf2d71898380f2aca9a8..b6d20c55282b9f20a95eae2d10294863b4dd05cd 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 
 kmem_zone_t    *xfs_buf_item_zone;
@@ -628,6 +629,7 @@ xfs_buf_item_unlock(
                else if (aborted) {
                        ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
                        if (lip->li_flags & XFS_LI_IN_AIL) {
+                               spin_lock(&lip->li_ailp->xa_lock);
                                xfs_trans_ail_delete(lip->li_ailp, lip,
                                                     SHUTDOWN_LOG_IO_ERROR);
                        }
index db6371087fe8ea9b786f82189b387c55979a2460..3f3455a415102de167271a7467725da410a21f24 100644 (file)
@@ -71,10 +71,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
 void   xfs_buf_iodone_callbacks(struct xfs_buf *);
 void   xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
 
-void   xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
-                              enum xfs_blft);
-void   xfs_trans_buf_copy_type(struct xfs_buf *dst_bp, struct xfs_buf *src_bp);
-
 extern kmem_zone_t     *xfs_buf_item_zone;
 
 #endif /* __XFS_BUF_ITEM_H__ */
index 069537c845e5cc424ce02bf3cec7838250357a32..eb65c546ffd851e134eb27479b0629b827669945 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
        /* start with smaller blk num */
        forward = nodehdr.forw < nodehdr.back;
        for (i = 0; i < 2; forward = !forward, i++) {
+               struct xfs_da3_icnode_hdr thdr;
                if (forward)
                        blkno = nodehdr.forw;
                else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
                        return(error);
 
                node = bp->b_addr;
-               xfs_da3_node_hdr_from_disk(&nodehdr, node);
+               xfs_da3_node_hdr_from_disk(&thdr, node);
                xfs_trans_brelse(state->args->trans, bp);
 
-               if (count - nodehdr.count >= 0)
+               if (count - thdr.count >= 0)
                        break;  /* fits with at least 25% to spare */
        }
        if (i >= 2) {
index b1f267995dea32e97dc041c7dd14af77e1630dc8..e492dcadd0322dc00c4c156b4556811bef200890 100644 (file)
@@ -24,146 +24,6 @@ struct xfs_inode;
 struct xfs_trans;
 struct zone;
 
-/*========================================================================
- * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
- *
- * It is used to manage a doubly linked list of all blocks at the same
- * level in the Btree, and to identify which type of block this is.
- */
-#define XFS_DA_NODE_MAGIC      0xfebe  /* magic number: non-leaf blocks */
-#define XFS_ATTR_LEAF_MAGIC    0xfbee  /* magic number: attribute leaf blks */
-#define        XFS_DIR2_LEAF1_MAGIC    0xd2f1  /* magic number: v2 dirlf single blks */
-#define        XFS_DIR2_LEAFN_MAGIC    0xd2ff  /* magic number: v2 dirlf multi blks */
-
-typedef struct xfs_da_blkinfo {
-       __be32          forw;                   /* previous block in list */
-       __be32          back;                   /* following block in list */
-       __be16          magic;                  /* validity check on block */
-       __be16          pad;                    /* unused */
-} xfs_da_blkinfo_t;
-
-/*
- * CRC enabled directory structure types
- *
- * The headers change size for the additional verification information, but
- * otherwise the tree layouts and contents are unchanged. Hence the da btree
- * code can use the struct xfs_da_blkinfo for manipulating the tree links and
- * magic numbers without modification for both v2 and v3 nodes.
- */
-#define XFS_DA3_NODE_MAGIC     0x3ebe  /* magic number: non-leaf blocks */
-#define XFS_ATTR3_LEAF_MAGIC   0x3bee  /* magic number: attribute leaf blks */
-#define        XFS_DIR3_LEAF1_MAGIC    0x3df1  /* magic number: v2 dirlf single blks */
-#define        XFS_DIR3_LEAFN_MAGIC    0x3dff  /* magic number: v2 dirlf multi blks */
-
-struct xfs_da3_blkinfo {
-       /*
-        * the node link manipulation code relies on the fact that the first
-        * element of this structure is the struct xfs_da_blkinfo so it can
-        * ignore the differences in the rest of the structures.
-        */
-       struct xfs_da_blkinfo   hdr;
-       __be32                  crc;    /* CRC of block */
-       __be64                  blkno;  /* first block of the buffer */
-       __be64                  lsn;    /* sequence number of last write */
-       uuid_t                  uuid;   /* filesystem we belong to */
-       __be64                  owner;  /* inode that owns the block */
-};
-
-/*
- * This is the structure of the root and intermediate nodes in the Btree.
- * The leaf nodes are defined above.
- *
- * Entries are not packed.
- *
- * Since we have duplicate keys, use a binary search but always follow
- * all match in the block, not just the first match found.
- */
-#define        XFS_DA_NODE_MAXDEPTH    5       /* max depth of Btree */
-
-typedef struct xfs_da_node_hdr {
-       struct xfs_da_blkinfo   info;   /* block type, links, etc. */
-       __be16                  __count; /* count of active entries */
-       __be16                  __level; /* level above leaves (leaf == 0) */
-} xfs_da_node_hdr_t;
-
-struct xfs_da3_node_hdr {
-       struct xfs_da3_blkinfo  info;   /* block type, links, etc. */
-       __be16                  __count; /* count of active entries */
-       __be16                  __level; /* level above leaves (leaf == 0) */
-       __be32                  __pad32;
-};
-
-#define XFS_DA3_NODE_CRC_OFF   (offsetof(struct xfs_da3_node_hdr, info.crc))
-
-typedef struct xfs_da_node_entry {
-       __be32  hashval;        /* hash value for this descendant */
-       __be32  before;         /* Btree block before this key */
-} xfs_da_node_entry_t;
-
-typedef struct xfs_da_intnode {
-       struct xfs_da_node_hdr  hdr;
-       struct xfs_da_node_entry __btree[];
-} xfs_da_intnode_t;
-
-struct xfs_da3_intnode {
-       struct xfs_da3_node_hdr hdr;
-       struct xfs_da_node_entry __btree[];
-};
-
-/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
-       __uint32_t      forw;
-       __uint32_t      back;
-       __uint16_t      magic;
-       __uint16_t      count;
-       __uint16_t      level;
-};
-
-extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
-                                      struct xfs_da_intnode *from);
-extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
-                                    struct xfs_da3_icnode_hdr *from);
-
-static inline int
-__xfs_da3_node_hdr_size(bool v3)
-{
-       if (v3)
-               return sizeof(struct xfs_da3_node_hdr);
-       return sizeof(struct xfs_da_node_hdr);
-}
-static inline int
-xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
-{
-       bool    v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
-
-       return __xfs_da3_node_hdr_size(v3);
-}
-
-static inline struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
-       if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
-               struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
-               return dap3->__btree;
-       }
-       return dap->__btree;
-}
-
-extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
-                                     struct xfs_da_intnode *from);
-extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
-                                   struct xfs_da3_icnode_hdr *from);
-
-#define        XFS_LBSIZE(mp)  (mp)->m_sb.sb_blocksize
-
 /*========================================================================
  * Btree searching and modification structure definitions.
  *========================================================================*/
@@ -309,8 +169,6 @@ int xfs_da3_node_read(struct xfs_trans *tp, struct xfs_inode *dp,
                         xfs_dablk_t bno, xfs_daddr_t mappedbno,
                         struct xfs_buf **bpp, int which_fork);
 
-extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
-
 /*
  * Utility routines.
  */
similarity index 63%
rename from fs/xfs/xfs_dir2_format.h
rename to fs/xfs/xfs_da_format.h
index a0961a61ac1a9a419fd537cd47dbb7337fbbc3a0..89a1a219c8ff57cc35a6e8c10f2309fff8ee183f 100644 (file)
  * along with this program; if not, write the Free Software Foundation,
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-#ifndef __XFS_DIR2_FORMAT_H__
-#define __XFS_DIR2_FORMAT_H__
+#ifndef __XFS_DA_FORMAT_H__
+#define __XFS_DA_FORMAT_H__
+
+/*========================================================================
+ * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
+ *
+ * It is used to manage a doubly linked list of all blocks at the same
+ * level in the Btree, and to identify which type of block this is.
+ */
+#define XFS_DA_NODE_MAGIC      0xfebe  /* magic number: non-leaf blocks */
+#define XFS_ATTR_LEAF_MAGIC    0xfbee  /* magic number: attribute leaf blks */
+#define        XFS_DIR2_LEAF1_MAGIC    0xd2f1  /* magic number: v2 dirlf single blks */
+#define        XFS_DIR2_LEAFN_MAGIC    0xd2ff  /* magic number: v2 dirlf multi blks */
+
+typedef struct xfs_da_blkinfo {
+       __be32          forw;                   /* previous block in list */
+       __be32          back;                   /* following block in list */
+       __be16          magic;                  /* validity check on block */
+       __be16          pad;                    /* unused */
+} xfs_da_blkinfo_t;
+
+/*
+ * CRC enabled directory structure types
+ *
+ * The headers change size for the additional verification information, but
+ * otherwise the tree layouts and contents are unchanged. Hence the da btree
+ * code can use the struct xfs_da_blkinfo for manipulating the tree links and
+ * magic numbers without modification for both v2 and v3 nodes.
+ */
+#define XFS_DA3_NODE_MAGIC     0x3ebe  /* magic number: non-leaf blocks */
+#define XFS_ATTR3_LEAF_MAGIC   0x3bee  /* magic number: attribute leaf blks */
+#define        XFS_DIR3_LEAF1_MAGIC    0x3df1  /* magic number: v2 dirlf single blks */
+#define        XFS_DIR3_LEAFN_MAGIC    0x3dff  /* magic number: v2 dirlf multi blks */
+
+struct xfs_da3_blkinfo {
+       /*
+        * the node link manipulation code relies on the fact that the first
+        * element of this structure is the struct xfs_da_blkinfo so it can
+        * ignore the differences in the rest of the structures.
+        */
+       struct xfs_da_blkinfo   hdr;
+       __be32                  crc;    /* CRC of block */
+       __be64                  blkno;  /* first block of the buffer */
+       __be64                  lsn;    /* sequence number of last write */
+       uuid_t                  uuid;   /* filesystem we belong to */
+       __be64                  owner;  /* inode that owns the block */
+};
+
+/*
+ * This is the structure of the root and intermediate nodes in the Btree.
+ * The leaf nodes are defined above.
+ *
+ * Entries are not packed.
+ *
+ * Since we have duplicate keys, use a binary search but always follow
+ * all match in the block, not just the first match found.
+ */
+#define        XFS_DA_NODE_MAXDEPTH    5       /* max depth of Btree */
+
+typedef struct xfs_da_node_hdr {
+       struct xfs_da_blkinfo   info;   /* block type, links, etc. */
+       __be16                  __count; /* count of active entries */
+       __be16                  __level; /* level above leaves (leaf == 0) */
+} xfs_da_node_hdr_t;
+
+struct xfs_da3_node_hdr {
+       struct xfs_da3_blkinfo  info;   /* block type, links, etc. */
+       __be16                  __count; /* count of active entries */
+       __be16                  __level; /* level above leaves (leaf == 0) */
+       __be32                  __pad32;
+};
+
+#define XFS_DA3_NODE_CRC_OFF   (offsetof(struct xfs_da3_node_hdr, info.crc))
+
+typedef struct xfs_da_node_entry {
+       __be32  hashval;        /* hash value for this descendant */
+       __be32  before;         /* Btree block before this key */
+} xfs_da_node_entry_t;
+
+typedef struct xfs_da_intnode {
+       struct xfs_da_node_hdr  hdr;
+       struct xfs_da_node_entry __btree[];
+} xfs_da_intnode_t;
+
+struct xfs_da3_intnode {
+       struct xfs_da3_node_hdr hdr;
+       struct xfs_da_node_entry __btree[];
+};
+
+/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+       __uint32_t      forw;
+       __uint32_t      back;
+       __uint16_t      magic;
+       __uint16_t      count;
+       __uint16_t      level;
+};
+
+extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
+                                      struct xfs_da_intnode *from);
+extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
+                                    struct xfs_da3_icnode_hdr *from);
+
+static inline int
+__xfs_da3_node_hdr_size(bool v3)
+{
+       if (v3)
+               return sizeof(struct xfs_da3_node_hdr);
+       return sizeof(struct xfs_da_node_hdr);
+}
+static inline int
+xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
+{
+       bool    v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
+
+       return __xfs_da3_node_hdr_size(v3);
+}
+
+static inline struct xfs_da_node_entry *
+xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
+{
+       if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
+               struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
+               return dap3->__btree;
+       }
+       return dap->__btree;
+}
+
+extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
+                                     struct xfs_da_intnode *from);
+extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
+                                   struct xfs_da3_icnode_hdr *from);
+
+#define        XFS_LBSIZE(mp)  (mp)->m_sb.sb_blocksize
 
 /*
  * Directory version 2.
@@ -497,69 +637,58 @@ xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
 /*
  * Offsets of . and .. in data space (always block 0)
  *
- * The macros are used for shortform directories as they have no headers to read
- * the magic number out of. Shortform directories need to know the size of the
- * data block header because the sfe embeds the block offset of the entry into
- * it so that it doesn't change when format conversion occurs. Bad Things Happen
- * if we don't follow this rule.
- *
  * XXX: there is scope for significant optimisation of the logic here. Right
  * now we are checking for "dir3 format" over and over again. Ideally we should
  * only do it once for each operation.
  */
-#define        XFS_DIR3_DATA_DOT_OFFSET(mp)    \
-       xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&(mp)->m_sb))
-#define        XFS_DIR3_DATA_DOTDOT_OFFSET(mp) \
-       (XFS_DIR3_DATA_DOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 1))
-#define        XFS_DIR3_DATA_FIRST_OFFSET(mp)          \
-       (XFS_DIR3_DATA_DOTDOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 2))
-
 static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dot_offset(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_dot_offset(struct xfs_mount *mp)
 {
-       return xfs_dir3_data_entry_offset(hdr);
+       return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
 }
 
 static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_dotdot_offset(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
 {
-       bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
-                   hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
-       return xfs_dir3_data_dot_offset(hdr) +
-               __xfs_dir3_data_entsize(dir3, 1);
+       return xfs_dir3_data_dot_offset(mp) +
+               xfs_dir3_data_entsize(mp, 1);
 }
 
 static inline xfs_dir2_data_aoff_t
-xfs_dir3_data_first_offset(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_first_offset(struct xfs_mount *mp)
 {
-       bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
-                   hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
-       return xfs_dir3_data_dotdot_offset(hdr) +
-               __xfs_dir3_data_entsize(dir3, 2);
+       return xfs_dir3_data_dotdot_offset(mp) +
+               xfs_dir3_data_entsize(mp, 2);
 }
 
 /*
  * location of . and .. in data space (always block 0)
  */
 static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dot_entry_p(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_dot_entry_p(
+       struct xfs_mount        *mp,
+       struct xfs_dir2_data_hdr *hdr)
 {
        return (struct xfs_dir2_data_entry *)
-               ((char *)hdr + xfs_dir3_data_dot_offset(hdr));
+               ((char *)hdr + xfs_dir3_data_dot_offset(mp));
 }
 
 static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_dotdot_entry_p(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_dotdot_entry_p(
+       struct xfs_mount        *mp,
+       struct xfs_dir2_data_hdr *hdr)
 {
        return (struct xfs_dir2_data_entry *)
-               ((char *)hdr + xfs_dir3_data_dotdot_offset(hdr));
+               ((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
 }
 
 static inline struct xfs_dir2_data_entry *
-xfs_dir3_data_first_entry_p(struct xfs_dir2_data_hdr *hdr)
+xfs_dir3_data_first_entry_p(
+       struct xfs_mount        *mp,
+       struct xfs_dir2_data_hdr *hdr)
 {
        return (struct xfs_dir2_data_entry *)
-               ((char *)hdr + xfs_dir3_data_first_offset(hdr));
+               ((char *)hdr + xfs_dir3_data_first_offset(mp));
 }
 
 /*
@@ -972,4 +1101,262 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
        return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
 }
 
-#endif /* __XFS_DIR2_FORMAT_H__ */
+
+/*
+ * Attribute storage layout
+ *
+ * Attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ *
+ *========================================================================
+ * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================
+ *
+ * Struct leaf_entry's are packed from the top.  Name/values grow from the
+ * bottom but are not packed.  The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped.  When the freemap doesn't show enough space
+ * for an allocation, we compact the name/value area and try again.  If we
+ * still don't have enough space, then we have to split the block.  The
+ * name/value structs (both local and remote versions) must be 32bit aligned.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual name string.  The root and intermediate node search always
+ * takes the first-in-the-block key match found, so we should only have
+ * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
+ * nodes until the hash key changes or the attribute name is found.
+ *
+ * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
+ * the leaf_entry.  The namespaces are independent only because we also look
+ * at the namespace bit when we are looking for a matching attribute name.
+ *
+ * We also store an "incomplete" bit in the leaf_entry.  It shows that an
+ * attribute is in the middle of being created and should not be shown to
+ * the user if we crash during the time that the bit is set.  We clear the
+ * bit when we have finished setting up the attribute.  We do this because
+ * we cannot create some large attributes inside a single transaction, and we
+ * need some indication that we weren't finished if we crash in the middle.
+ */
+#define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
+
+typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
+       __be16  base;                     /* base of free region */
+       __be16  size;                     /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr {     /* constant-structure header block */
+       xfs_da_blkinfo_t info;          /* block type, links, etc. */
+       __be16  count;                  /* count of active leaf_entry's */
+       __be16  usedbytes;              /* num bytes of names/values stored */
+       __be16  firstused;              /* first used byte in name area */
+       __u8    holes;                  /* != 0 if blk needs compaction */
+       __u8    pad1;
+       xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+                                       /* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry {   /* sorted on key, not name */
+       __be32  hashval;                /* hash value of name */
+       __be16  nameidx;                /* index into buffer of name/value */
+       __u8    flags;                  /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+       __u8    pad2;                   /* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+       __be16  valuelen;               /* number of bytes in value */
+       __u8    namelen;                /* length of name bytes */
+       __u8    nameval[1];             /* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+       __be32  valueblk;               /* block number of value bytes */
+       __be32  valuelen;               /* number of bytes in value */
+       __u8    namelen;                /* length of name bytes */
+       __u8    name[1];                /* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
+typedef struct xfs_attr_leafblock {
+       xfs_attr_leaf_hdr_t     hdr;    /* constant-structure header block */
+       xfs_attr_leaf_entry_t   entries[1];     /* sorted on key, not name */
+       xfs_attr_leaf_name_local_t namelist;    /* grows from bottom of buf */
+       xfs_attr_leaf_name_remote_t valuelist;  /* grows from bottom of buf */
+} xfs_attr_leafblock_t;
+
+/*
+ * CRC enabled leaf structures. Called "version 3" structures to match the
+ * version number of the directory and dablk structures for this feature, and
+ * attr2 is already taken by the variable inode attribute fork size feature.
+ */
+struct xfs_attr3_leaf_hdr {
+       struct xfs_da3_blkinfo  info;
+       __be16                  count;
+       __be16                  usedbytes;
+       __be16                  firstused;
+       __u8                    holes;
+       __u8                    pad1;
+       struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+       __be32                  pad2;           /* 64 bit alignment */
+};
+
+#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
+
+struct xfs_attr3_leafblock {
+       struct xfs_attr3_leaf_hdr       hdr;
+       struct xfs_attr_leaf_entry      entries[1];
+
+       /*
+        * The rest of the block contains the following structures after the
+        * leaf entries, growing from the bottom up. The variables are never
+        * referenced, the locations accessed purely from helper functions.
+        *
+        * struct xfs_attr_leaf_name_local
+        * struct xfs_attr_leaf_name_remote
+        */
+};
+
+/*
+ * incore, neutral version of the attribute leaf header
+ */
+struct xfs_attr3_icleaf_hdr {
+       __uint32_t      forw;
+       __uint32_t      back;
+       __uint16_t      magic;
+       __uint16_t      count;
+       __uint16_t      usedbytes;
+       __uint16_t      firstused;
+       __u8            holes;
+       struct {
+               __uint16_t      base;
+               __uint16_t      size;
+       } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
+ * Flags used in the leaf_entry[i].flags field.
+ * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
+ * on the system call, they are "or"ed together for various operations.
+ */
+#define        XFS_ATTR_LOCAL_BIT      0       /* attr is stored locally */
+#define        XFS_ATTR_ROOT_BIT       1       /* limit access to trusted attrs */
+#define        XFS_ATTR_SECURE_BIT     2       /* limit access to secure attrs */
+#define        XFS_ATTR_INCOMPLETE_BIT 7       /* attr in middle of create/delete */
+#define XFS_ATTR_LOCAL         (1 << XFS_ATTR_LOCAL_BIT)
+#define XFS_ATTR_ROOT          (1 << XFS_ATTR_ROOT_BIT)
+#define XFS_ATTR_SECURE                (1 << XFS_ATTR_SECURE_BIT)
+#define XFS_ATTR_INCOMPLETE    (1 << XFS_ATTR_INCOMPLETE_BIT)
+
+/*
+ * Conversion macros for converting namespace bits from argument flags
+ * to ondisk flags.
+ */
+#define XFS_ATTR_NSP_ARGS_MASK         (ATTR_ROOT | ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK_MASK       (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK(flags)     ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
+#define XFS_ATTR_NSP_ARGS(flags)       ((flags) & XFS_ATTR_NSP_ARGS_MASK)
+#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
+                                        ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
+#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
+                                        ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
+
+/*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+ */
+#define        XFS_ATTR_LEAF_NAME_ALIGN        ((uint)sizeof(xfs_dablk_t))
+
+static inline int
+xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
+{
+       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+               return sizeof(struct xfs_attr3_leaf_hdr);
+       return sizeof(struct xfs_attr_leaf_hdr);
+}
+
+static inline struct xfs_attr_leaf_entry *
+xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
+{
+       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+               return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
+       return &leafp->entries[0];
+}
+
+/*
+ * Cast typed pointers for "local" and "remote" name/value structs.
+ */
+static inline char *
+xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
+{
+       struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
+
+       return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
+}
+
+static inline xfs_attr_leaf_name_remote_t *
+xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
+{
+       return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+static inline xfs_attr_leaf_name_local_t *
+xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
+{
+       return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+/*
+ * Calculate total bytes used (including trailing pad for alignment) for
+ * a "local" name/value structure, a "remote" name/value structure, and
+ * a pointer which might be either.
+ */
+static inline int xfs_attr_leaf_entsize_remote(int nlen)
+{
+       return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
+               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
+{
+       return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
+               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local_max(int bsize)
+{
+       return (((bsize) >> 1) + ((bsize) >> 2));
+}
+
+
+
+/*
+ * Remote attribute block format definition
+ *
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
+#define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
+
+struct xfs_attr3_rmt_hdr {
+       __be32  rm_magic;
+       __be32  rm_offset;
+       __be32  rm_bytes;
+       __be32  rm_crc;
+       uuid_t  rm_uuid;
+       __be64  rm_owner;
+       __be64  rm_blkno;
+       __be64  rm_lsn;
+};
+
+#define XFS_ATTR3_RMT_CRC_OFF  offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
+
+#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)   \
+       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+                       sizeof(struct xfs_attr3_rmt_hdr) : 0))
+
+#endif /* __XFS_DA_FORMAT_H__ */
index edf203ab50afa734a74faaace8e626721e7fc1bb..38bf9324302c497aadf73d22524c1271b80f18a5 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
index 0957aa98b6c0d6bb1fdc3fa23161d761aeb58567..9f3f83a5e2da9db832e2dc22b6e74a7eb8655350 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_buf_item.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Local function prototypes.
@@ -1158,7 +1158,7 @@ xfs_dir2_sf_to_block(
        /*
         * Create entry for .
         */
-       dep = xfs_dir3_data_dot_entry_p(hdr);
+       dep = xfs_dir3_data_dot_entry_p(mp, hdr);
        dep->inumber = cpu_to_be64(dp->i_ino);
        dep->namelen = 1;
        dep->name[0] = '.';
@@ -1172,7 +1172,7 @@ xfs_dir2_sf_to_block(
        /*
         * Create entry for ..
         */
-       dep = xfs_dir3_data_dotdot_entry_p(hdr);
+       dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
        dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
        dep->namelen = 2;
        dep->name[0] = dep->name[1] = '.';
@@ -1183,7 +1183,7 @@ xfs_dir2_sf_to_block(
        blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
        blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
                                (char *)dep - (char *)hdr));
-       offset = xfs_dir3_data_first_offset(hdr);
+       offset = xfs_dir3_data_first_offset(mp);
        /*
         * Loop over existing entries, stuff them in.
         */
index 47e1326c169a08c71d8d21ac51e8d113444d3295..ccfeb4d8376a6aa91985ce30c19285bc221640f4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
index 1021c8356d0836318e300adefc4a35f170d120c3..51fdc11a1e2c1e4455ac8aa5c8649e21bf4076eb 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
index 4c3dba7ffb7439d250b0af44e4de237a9359a795..b8381646b8af6d9c6715781a84f704b558d8c25c 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
@@ -1101,7 +1100,7 @@ xfs_dir2_leafn_rebalance(
                state->inleaf = 1;
                blk2->index = 0;
                xfs_alert(args->dp->i_mount,
-       "%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n",
+       "%s: picked the wrong leaf? reverting original leaf: blk1->index %d",
                        __func__, blk1->index);
        }
 }
index 8993ec17452cd7bd62794e71585ea003badad0c0..45c9ce8cdb28110a4e1436d5b48ad7d30807c650 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_dinode.h"
 
 /*
  * Directory file type support functions
@@ -119,9 +119,9 @@ xfs_dir2_sf_getdents(
         * mp->m_dirdatablk.
         */
        dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                                            XFS_DIR3_DATA_DOT_OFFSET(mp));
+                                            xfs_dir3_data_dot_offset(mp));
        dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                                               XFS_DIR3_DATA_DOTDOT_OFFSET(mp));
+                                               xfs_dir3_data_dotdot_offset(mp));
 
        /*
         * Put . entry unless we're starting past it.
index bb6e2848f473d024d5ff1dd70a1232443901ea9f..8811ee5eaec65e7762d261781a6f0c2f7303c64b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * Prototypes for internal functions.
@@ -557,7 +557,7 @@ xfs_dir2_sf_addname_hard(
         * to insert the new entry.
         * If it's going to end up at the end then oldsfep will point there.
         */
-       for (offset = XFS_DIR3_DATA_FIRST_OFFSET(mp),
+       for (offset = xfs_dir3_data_first_offset(mp),
              oldsfep = xfs_dir2_sf_firstentry(oldsfp),
              add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
              eof = (char *)oldsfep == &buf[old_isize];
@@ -640,7 +640,7 @@ xfs_dir2_sf_addname_pick(
 
        sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
        size = xfs_dir3_data_entsize(mp, args->namelen);
-       offset = XFS_DIR3_DATA_FIRST_OFFSET(mp);
+       offset = xfs_dir3_data_first_offset(mp);
        sfep = xfs_dir2_sf_firstentry(sfp);
        holefit = 0;
        /*
@@ -713,7 +713,7 @@ xfs_dir2_sf_check(
        mp = dp->i_mount;
 
        sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
-       offset = XFS_DIR3_DATA_FIRST_OFFSET(mp);
+       offset = xfs_dir3_data_first_offset(mp);
        ino = xfs_dir2_sf_get_parent_ino(sfp);
        i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
 
index 45560ee1a4ba8b1ccfdc36f9616cc5355e03558d..8367d6dc18c9df7f51cd565e11395cba24929a53 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_quota.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 STATIC int
 xfs_trim_extents(
index 71520e6e5d65048c04996133621b2916653b5c31..6b1e695caf0ebf8bf256150d2efc9483667b837d 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Lock order:
@@ -64,7 +64,8 @@ int xfs_dqerror_mod = 33;
 struct kmem_zone               *xfs_qm_dqtrxzone;
 static struct kmem_zone                *xfs_qm_dqzone;
 
-static struct lock_class_key xfs_dquot_other_class;
+static struct lock_class_key xfs_dquot_group_class;
+static struct lock_class_key xfs_dquot_project_class;
 
 /*
  * This is called to free all the memory associated with a dquot
@@ -291,118 +292,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 }
 
-STATIC bool
-xfs_dquot_buf_verify_crc(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       int                     ndquots;
-       int                     i;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return true;
-
-       /*
-        * if we are in log recovery, the quota subsystem has not been
-        * initialised so we have no quotainfo structure. In that case, we need
-        * to manually calculate the number of dquots in the buffer.
-        */
-       if (mp->m_quotainfo)
-               ndquots = mp->m_quotainfo->qi_dqperchunk;
-       else
-               ndquots = xfs_qm_calc_dquots_per_chunk(mp,
-                                       XFS_BB_TO_FSB(mp, bp->b_length));
-
-       for (i = 0; i < ndquots; i++, d++) {
-               if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                XFS_DQUOT_CRC_OFF))
-                       return false;
-               if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
-                       return false;
-       }
-       return true;
-}
-
-STATIC bool
-xfs_dquot_buf_verify(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       xfs_dqid_t              id = 0;
-       int                     ndquots;
-       int                     i;
-
-       /*
-        * if we are in log recovery, the quota subsystem has not been
-        * initialised so we have no quotainfo structure. In that case, we need
-        * to manually calculate the number of dquots in the buffer.
-        */
-       if (mp->m_quotainfo)
-               ndquots = mp->m_quotainfo->qi_dqperchunk;
-       else
-               ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
-
-       /*
-        * On the first read of the buffer, verify that each dquot is valid.
-        * We don't know what the id of the dquot is supposed to be, just that
-        * they should be increasing monotonically within the buffer. If the
-        * first id is corrupt, then it will fail on the second dquot in the
-        * buffer so corruptions could point to the wrong dquot in this case.
-        */
-       for (i = 0; i < ndquots; i++) {
-               struct xfs_disk_dquot   *ddq;
-               int                     error;
-
-               ddq = &d[i].dd_diskdq;
-
-               if (i == 0)
-                       id = be32_to_cpu(ddq->d_id);
-
-               error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
-                                      "xfs_dquot_buf_verify");
-               if (error)
-                       return false;
-       }
-       return true;
-}
-
-static void
-xfs_dquot_buf_read_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-
-       if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-       }
-}
-
-/*
- * we don't calculate the CRC here as that is done when the dquot is flushed to
- * the buffer after the update is done. This ensures that the dquot in the
- * buffer always has an up-to-date CRC value.
- */
-void
-xfs_dquot_buf_write_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-
-       if (!xfs_dquot_buf_verify(mp, bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-               return;
-       }
-}
-
-const struct xfs_buf_ops xfs_dquot_buf_ops = {
-       .verify_read = xfs_dquot_buf_read_verify,
-       .verify_write = xfs_dquot_buf_write_verify,
-};
-
 /*
  * Allocate a block and fill it with dquots.
  * This is called when the bmapi finds a hole.
@@ -513,6 +402,7 @@ xfs_qm_dqalloc(
 
        return (error);
 }
+
 STATIC int
 xfs_qm_dqrepair(
        struct xfs_mount        *mp,
@@ -546,7 +436,7 @@ xfs_qm_dqrepair(
        /* Do the actual repair of dquots in this buffer */
        for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
                ddq = &d[i].dd_diskdq;
-               error = xfs_qm_dqcheck(mp, ddq, firstid + i,
+               error = xfs_dqcheck(mp, ddq, firstid + i,
                                       dqp->dq_flags & XFS_DQ_ALLTYPES,
                                       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
                if (error) {
@@ -703,8 +593,20 @@ xfs_qm_dqread(
         * Make sure group quotas have a different lock class than user
         * quotas.
         */
-       if (!(type & XFS_DQ_USER))
-               lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
+       switch (type) {
+       case XFS_DQ_USER:
+               /* uses the default lock class */
+               break;
+       case XFS_DQ_GROUP:
+               lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
+               break;
+       case XFS_DQ_PROJ:
+               lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
+               break;
+       default:
+               ASSERT(0);
+               break;
+       }
 
        XFS_STATS_INC(xs_qm_dquot);
 
@@ -1120,7 +1022,7 @@ xfs_qm_dqflush(
        /*
         * A simple sanity check in case we got a corrupted dquot..
         */
-       error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+       error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
                           XFS_QMOPT_DOWARN, "dqflush (incore copy)");
        if (error) {
                xfs_buf_relse(bp);
index 55abbca2883d84231e7ca61a722f2b394b41376d..d22ed0053c32ea0dd687f7e2ab439b8181d4cbc8 100644 (file)
@@ -172,6 +172,4 @@ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
        return dqp;
 }
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_buf.c b/fs/xfs/xfs_dquot_buf.c
new file mode 100644 (file)
index 0000000..aaaf41b
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_qm.h"
+#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+
+int
+xfs_calc_dquots_per_chunk(
+       struct xfs_mount        *mp,
+       unsigned int            nbblks) /* basic block units */
+{
+       unsigned int    ndquots;
+
+       ASSERT(nbblks > 0);
+       ndquots = BBTOB(nbblks);
+       do_div(ndquots, sizeof(xfs_dqblk_t));
+
+       return ndquots;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dqcheck(
+       struct xfs_mount *mp,
+       xfs_disk_dquot_t *ddq,
+       xfs_dqid_t       id,
+       uint             type,    /* used only when IO_dorepair is true */
+       uint             flags,
+       char             *str)
+{
+       xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
+       int             errs = 0;
+
+       /*
+        * We can encounter an uninitialized dquot buffer for 2 reasons:
+        * 1. If we crash while deleting the quotainode(s), and those blks got
+        *    used for user data. This is because we take the path of regular
+        *    file deletion; however, the size field of quotainodes is never
+        *    updated, so all the tricks that we play in itruncate_finish
+        *    don't quite matter.
+        *
+        * 2. We don't play the quota buffers when there's a quotaoff logitem.
+        *    But the allocation will be replayed so we'll end up with an
+        *    uninitialized quota block.
+        *
+        * This is all fine; things are still consistent, and we haven't lost
+        * any quota information. Just don't complain about bad dquot blks.
+        */
+       if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
+                       str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
+               errs++;
+       }
+       if (ddq->d_version != XFS_DQUOT_VERSION) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
+                       str, id, ddq->d_version, XFS_DQUOT_VERSION);
+               errs++;
+       }
+
+       if (ddq->d_flags != XFS_DQ_USER &&
+           ddq->d_flags != XFS_DQ_PROJ &&
+           ddq->d_flags != XFS_DQ_GROUP) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
+                       str, id, ddq->d_flags);
+               errs++;
+       }
+
+       if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : ondisk-dquot 0x%p, ID mismatch: "
+                       "0x%x expected, found id 0x%x",
+                       str, ddq, id, be32_to_cpu(ddq->d_id));
+               errs++;
+       }
+
+       if (!errs && ddq->d_id) {
+               if (ddq->d_blk_softlimit &&
+                   be64_to_cpu(ddq->d_bcount) >
+                               be64_to_cpu(ddq->d_blk_softlimit)) {
+                       if (!ddq->d_btimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+               if (ddq->d_ino_softlimit &&
+                   be64_to_cpu(ddq->d_icount) >
+                               be64_to_cpu(ddq->d_ino_softlimit)) {
+                       if (!ddq->d_itimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+               if (ddq->d_rtb_softlimit &&
+                   be64_to_cpu(ddq->d_rtbcount) >
+                               be64_to_cpu(ddq->d_rtb_softlimit)) {
+                       if (!ddq->d_rtbtimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+       }
+
+       if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
+               return errs;
+
+       if (flags & XFS_QMOPT_DOWARN)
+               xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
+
+       /*
+        * Typically, a repair is only requested by quotacheck.
+        */
+       ASSERT(id != -1);
+       ASSERT(flags & XFS_QMOPT_DQREPAIR);
+       memset(d, 0, sizeof(xfs_dqblk_t));
+
+       d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+       d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+       d->dd_diskdq.d_flags = type;
+       d->dd_diskdq.d_id = cpu_to_be32(id);
+
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
+
+       return errs;
+}
+
+STATIC bool
+xfs_dquot_buf_verify_crc(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
+       int                     ndquots;
+       int                     i;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return true;
+
+       /*
+        * if we are in log recovery, the quota subsystem has not been
+        * initialised so we have no quotainfo structure. In that case, we need
+        * to manually calculate the number of dquots in the buffer.
+        */
+       if (mp->m_quotainfo)
+               ndquots = mp->m_quotainfo->qi_dqperchunk;
+       else
+               ndquots = xfs_calc_dquots_per_chunk(mp,
+                                       XFS_BB_TO_FSB(mp, bp->b_length));
+
+       for (i = 0; i < ndquots; i++, d++) {
+               if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF))
+                       return false;
+               if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
+                       return false;
+       }
+       return true;
+}
+
+STATIC bool
+xfs_dquot_buf_verify(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
+       xfs_dqid_t              id = 0;
+       int                     ndquots;
+       int                     i;
+
+       /*
+        * if we are in log recovery, the quota subsystem has not been
+        * initialised so we have no quotainfo structure. In that case, we need
+        * to manually calculate the number of dquots in the buffer.
+        */
+       if (mp->m_quotainfo)
+               ndquots = mp->m_quotainfo->qi_dqperchunk;
+       else
+               ndquots = xfs_calc_dquots_per_chunk(mp, bp->b_length);
+
+       /*
+        * On the first read of the buffer, verify that each dquot is valid.
+        * We don't know what the id of the dquot is supposed to be, just that
+        * they should be increasing monotonically within the buffer. If the
+        * first id is corrupt, then it will fail on the second dquot in the
+        * buffer so corruptions could point to the wrong dquot in this case.
+        */
+       for (i = 0; i < ndquots; i++) {
+               struct xfs_disk_dquot   *ddq;
+               int                     error;
+
+               ddq = &d[i].dd_diskdq;
+
+               if (i == 0)
+                       id = be32_to_cpu(ddq->d_id);
+
+               error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+                                      "xfs_dquot_buf_verify");
+               if (error)
+                       return false;
+       }
+       return true;
+}
+
+static void
+xfs_dquot_buf_read_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+
+       if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+       }
+}
+
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
+void
+xfs_dquot_buf_write_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+
+       if (!xfs_dquot_buf_verify(mp, bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+               return;
+       }
+}
+
+const struct xfs_buf_ops xfs_dquot_buf_ops = {
+       .verify_read = xfs_dquot_buf_read_verify,
+       .verify_write = xfs_dquot_buf_write_verify,
+};
+
index e838d84b4e85697917c8623418c06ae28140a8b7..92e5f62eefc6612fb5890631edd42629af1e768b 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
+#include "xfs_log.h"
 
 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
 {
index 1123d93ff79546efe3a9d962460ab1e44e2e1bac..9995b807d627eb564da0747124359caa6141ee57 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_error.h"
 
 #ifdef DEBUG
@@ -159,7 +156,7 @@ xfs_error_report(
 {
        if (level <= xfs_error_level) {
                xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
-               "Internal error %s at line %d of file %s.  Caller 0x%p\n",
+               "Internal error %s at line %d of file %s.  Caller 0x%p",
                            tag, linenum, filename, ra);
 
                xfs_stack_trace();
index 066df425c14ffca5b4dacb20f7b3c6fcdd139acb..1399e187d425dc7af0f8b5062afaf312fe5a0927 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
 #include "xfs_export.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
index e43708e2f0806d4daae241ee32e18abcac1a017f..fd22f69049d49861ff1f853ec11a2d23e3eef03f 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_alloc.h"
-#include "xfs_inode.h"
 #include "xfs_extent_busy.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 
 void
 xfs_extent_busy_insert(
index 985412d65ba5119519300a5b414d7d2ea8ba1707..bfff284d2dcce434a16c72df62713098a812dfd4 100644 (file)
 #ifndef __XFS_EXTENT_BUSY_H__
 #define        __XFS_EXTENT_BUSY_H__
 
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_alloc_arg;
+
 /*
  * Busy block/extent entry.  Indexed by a rbtree in perag to mark blocks that
  * have been freed but whose transactions aren't committed to disk yet.
index dc53e8febbbeaa54812b4e72dc25938718da69c1..3680d04f973fa1c9de4390079a5c5ba3d8e69eae 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
 #include "xfs_extfree_item.h"
 
 
index 4c749ab543d0de17646993a282f925ebd0314ccf..e6035bd58294d1a03c50933ec113c46b0465d4aa 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_ioctl.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -227,10 +229,9 @@ xfs_file_fsync(
 }
 
 STATIC ssize_t
-xfs_file_aio_read(
+xfs_file_read_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -251,9 +252,7 @@ xfs_file_aio_read(
        if (file->f_mode & FMODE_NOCMTIME)
                ioflags |= IO_INVIS;
 
-       ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
-       if (ret < 0)
-               return ret;
+       size = iov_iter_count(iter);
 
        if (unlikely(ioflags & IO_ISDIRECT)) {
                xfs_buftarg_t   *target =
@@ -306,7 +305,7 @@ xfs_file_aio_read(
 
        trace_xfs_file_read(ip, size, pos, ioflags);
 
-       ret = generic_file_aio_read(iocb, iovp, nr_segs, pos);
+       ret = generic_file_read_iter(iocb, iter, pos);
        if (ret > 0)
                XFS_STATS_ADD(xs_read_bytes, ret);
 
@@ -622,10 +621,9 @@ restart:
 STATIC ssize_t
 xfs_file_dio_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -633,7 +631,6 @@ xfs_file_dio_aio_write(
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        ssize_t                 ret = 0;
-       size_t                  count = ocount;
        int                     unaligned_io = 0;
        int                     iolock;
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
@@ -693,8 +690,8 @@ xfs_file_dio_aio_write(
        }
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_direct_write(iocb, iovp,
-                       &nr_segs, pos, &iocb->ki_pos, count, ocount);
+       ret = generic_file_direct_write_iter(iocb, iter,
+                       pos, &iocb->ki_pos, count);
 
 out:
        xfs_rw_iunlock(ip, iolock);
@@ -707,10 +704,9 @@ out:
 STATIC ssize_t
 xfs_file_buffered_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -719,7 +715,6 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       size_t                  count = ocount;
 
        xfs_rw_ilock(ip, iolock);
 
@@ -732,7 +727,7 @@ xfs_file_buffered_aio_write(
 
 write_retry:
        trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+       ret = generic_file_buffered_write_iter(iocb, iter,
                        pos, &iocb->ki_pos, count, 0);
 
        /*
@@ -753,10 +748,9 @@ out:
 }
 
 STATIC ssize_t
-xfs_file_aio_write(
+xfs_file_write_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -764,17 +758,15 @@ xfs_file_aio_write(
        struct inode            *inode = mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
-       size_t                  ocount = 0;
+       size_t                  count = 0;
 
        XFS_STATS_INC(xs_write_calls);
 
        BUG_ON(iocb->ki_pos != pos);
 
-       ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-       if (ret)
-               return ret;
+       count = iov_iter_count(iter);
 
-       if (ocount == 0)
+       if (count == 0)
                return 0;
 
        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -783,10 +775,9 @@ xfs_file_aio_write(
        }
 
        if (unlikely(file->f_flags & O_DIRECT))
-               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
+               ret = xfs_file_dio_aio_write(iocb, iter, pos, count);
        else
-               ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
-                                                 ocount);
+               ret = xfs_file_buffered_aio_write(iocb, iter, pos, count);
 
        if (ret > 0) {
                ssize_t err;
@@ -805,44 +796,64 @@ out:
 
 STATIC long
 xfs_file_fallocate(
-       struct file     *file,
-       int             mode,
-       loff_t          offset,
-       loff_t          len)
+       struct file             *file,
+       int                     mode,
+       loff_t                  offset,
+       loff_t                  len)
 {
-       struct inode    *inode = file_inode(file);
-       long            error;
-       loff_t          new_size = 0;
-       xfs_flock64_t   bf;
-       xfs_inode_t     *ip = XFS_I(inode);
-       int             cmd = XFS_IOC_RESVSP;
-       int             attr_flags = XFS_ATTR_NOLOCK;
+       struct inode            *inode = file_inode(file);
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_trans        *tp;
+       long                    error;
+       loff_t                  new_size = 0;
 
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
                return -EOPNOTSUPP;
 
-       bf.l_whence = 0;
-       bf.l_start = offset;
-       bf.l_len = len;
-
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               error = xfs_free_file_space(ip, offset, len);
+               if (error)
+                       goto out_unlock;
+       } else {
+               if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+                   offset + len > i_size_read(inode)) {
+                       new_size = offset + len;
+                       error = -inode_newsize_ok(inode, new_size);
+                       if (error)
+                               goto out_unlock;
+               }
 
-       if (mode & FALLOC_FL_PUNCH_HOLE)
-               cmd = XFS_IOC_UNRESVSP;
-
-       /* check the new inode size is valid before allocating */
-       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-           offset + len > i_size_read(inode)) {
-               new_size = offset + len;
-               error = inode_newsize_ok(inode, new_size);
+               error = xfs_alloc_file_space(ip, offset, len,
+                                            XFS_BMAPI_PREALLOC);
                if (error)
                        goto out_unlock;
        }
 
-       if (file->f_flags & O_DSYNC)
-               attr_flags |= XFS_ATTR_SYNC;
+       tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_unlock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       ip->i_d.di_mode &= ~S_ISUID;
+       if (ip->i_d.di_mode & S_IXGRP)
+               ip->i_d.di_mode &= ~S_ISGID;
+
+       if (!(mode & FALLOC_FL_PUNCH_HOLE))
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
 
-       error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
+       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       if (file->f_flags & O_DSYNC)
+               xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
        if (error)
                goto out_unlock;
 
@@ -852,12 +863,12 @@ xfs_file_fallocate(
 
                iattr.ia_valid = ATTR_SIZE;
                iattr.ia_size = new_size;
-               error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
+               error = xfs_setattr_size(ip, &iattr);
        }
 
 out_unlock:
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-       return error;
+       return -error;
 }
 
 
@@ -1411,8 +1422,8 @@ const struct file_operations xfs_file_operations = {
        .llseek         = xfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = xfs_file_aio_read,
-       .aio_write      = xfs_file_aio_write,
+       .read_iter      = xfs_file_read_iter,
+       .write_iter     = xfs_file_write_iter,
        .splice_read    = xfs_file_splice_read,
        .splice_write   = xfs_file_splice_write,
        .unlocked_ioctl = xfs_file_ioctl,
index ce78e654d37b73693aa4c637e021dda9154ad5d6..12b6e7701985378e56f619dfd58f79be0d45c94c 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inum.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inum.h"
+#include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_alloc.h"
 #include "xfs_mru_cache.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_trace.h"
 
index 35c08ff54ca079dcf6c718d6b2d6b75bf24f1721..b6ab5a3cfa125d2204d19760dccac917d0ad957f 100644 (file)
@@ -156,14 +156,259 @@ struct xfs_dsymlink_hdr {
        ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
                        sizeof(struct xfs_dsymlink_hdr) : 0))
 
-int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
-int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-                       uint32_t size, struct xfs_buf *bp);
-bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-                       uint32_t size, struct xfs_buf *bp);
-void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
-                                struct xfs_inode *ip, struct xfs_ifork *ifp);
-
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Allocation Btree format definitions
+ *
+ * There are two on-disk btrees, one sorted by blockno and one sorted
+ * by blockcount and blockno.  All blocks look the same to make the code
+ * simpler; if we have time later, we'll make the optimizations.
+ */
+#define        XFS_ABTB_MAGIC          0x41425442      /* 'ABTB' for bno tree */
+#define        XFS_ABTB_CRC_MAGIC      0x41423342      /* 'AB3B' */
+#define        XFS_ABTC_MAGIC          0x41425443      /* 'ABTC' for cnt tree */
+#define        XFS_ABTC_CRC_MAGIC      0x41423343      /* 'AB3C' */
+
+/*
+ * Data record/key structure
+ */
+typedef struct xfs_alloc_rec {
+       __be32          ar_startblock;  /* starting block number */
+       __be32          ar_blockcount;  /* count of free blocks */
+} xfs_alloc_rec_t, xfs_alloc_key_t;
+
+typedef struct xfs_alloc_rec_incore {
+       xfs_agblock_t   ar_startblock;  /* starting block number */
+       xfs_extlen_t    ar_blockcount;  /* count of free blocks */
+} xfs_alloc_rec_incore_t;
+
+/* btree pointer type */
+typedef __be32 xfs_alloc_ptr_t;
+
+/*
+ * Block numbers in the AG:
+ * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
+ */
+#define        XFS_BNO_BLOCK(mp)       ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
+#define        XFS_CNT_BLOCK(mp)       ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+
+
+/*
+ * Inode Allocation Btree format definitions
+ *
+ * There is a btree for the inode map per allocation group.
+ */
+#define        XFS_IBT_MAGIC           0x49414254      /* 'IABT' */
+#define        XFS_IBT_CRC_MAGIC       0x49414233      /* 'IAB3' */
+
+typedef        __uint64_t      xfs_inofree_t;
+#define        XFS_INODES_PER_CHUNK            (NBBY * sizeof(xfs_inofree_t))
+#define        XFS_INODES_PER_CHUNK_LOG        (XFS_NBBYLOG + 3)
+#define        XFS_INOBT_ALL_FREE              ((xfs_inofree_t)-1)
+#define        XFS_INOBT_MASK(i)               ((xfs_inofree_t)1 << (i))
+
+static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
+{
+       return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
+}
+
+/*
+ * Data record structure
+ */
+typedef struct xfs_inobt_rec {
+       __be32          ir_startino;    /* starting inode number */
+       __be32          ir_freecount;   /* count of free inodes (set bits) */
+       __be64          ir_free;        /* free inode mask */
+} xfs_inobt_rec_t;
+
+typedef struct xfs_inobt_rec_incore {
+       xfs_agino_t     ir_startino;    /* starting inode number */
+       __int32_t       ir_freecount;   /* count of free inodes (set bits) */
+       xfs_inofree_t   ir_free;        /* free inode mask */
+} xfs_inobt_rec_incore_t;
+
+
+/*
+ * Key structure
+ */
+typedef struct xfs_inobt_key {
+       __be32          ir_startino;    /* starting inode number */
+} xfs_inobt_key_t;
+
+/* btree pointer type */
+typedef __be32 xfs_inobt_ptr_t;
+
+/*
+ * block numbers in the AG.
+ */
+#define        XFS_IBT_BLOCK(mp)               ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
+#define        XFS_PREALLOC_BLOCKS(mp)         ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
+
+
+
+/*
+ * BMAP Btree format definitions
+ *
+ * This includes both the root block definition that sits inside an inode fork
+ * and the record/pointer formats for the leaf/node in the blocks.
+ */
+#define XFS_BMAP_MAGIC         0x424d4150      /* 'BMAP' */
+#define XFS_BMAP_CRC_MAGIC     0x424d4133      /* 'BMA3' */
+
+/*
+ * Bmap root header, on-disk form only.
+ */
+typedef struct xfs_bmdr_block {
+       __be16          bb_level;       /* 0 is a leaf */
+       __be16          bb_numrecs;     /* current # of data records */
+} xfs_bmdr_block_t;
+
+/*
+ * Bmap btree record and extent descriptor.
+ *  l0:63 is an extent flag (value 1 indicates non-normal).
+ *  l0:9-62 are startoff.
+ *  l0:0-8 and l1:21-63 are startblock.
+ *  l1:0-20 are blockcount.
+ */
+#define BMBT_EXNTFLAG_BITLEN   1
+#define BMBT_STARTOFF_BITLEN   54
+#define BMBT_STARTBLOCK_BITLEN 52
+#define BMBT_BLOCKCOUNT_BITLEN 21
+
+typedef struct xfs_bmbt_rec {
+       __be64                  l0, l1;
+} xfs_bmbt_rec_t;
+
+typedef __uint64_t     xfs_bmbt_rec_base_t;    /* use this for casts */
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
+
+typedef struct xfs_bmbt_rec_host {
+       __uint64_t              l0, l1;
+} xfs_bmbt_rec_host_t;
+
+/*
+ * Values and macros for delayed-allocation startblock fields.
+ */
+#define STARTBLOCKVALBITS      17
+#define STARTBLOCKMASKBITS     (15 + XFS_BIG_BLKNOS * 20)
+#define DSTARTBLOCKMASKBITS    (15 + 20)
+#define STARTBLOCKMASK         \
+       (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#define DSTARTBLOCKMASK                \
+       (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+
+static inline int isnullstartblock(xfs_fsblock_t x)
+{
+       return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
+}
+
+static inline int isnulldstartblock(xfs_dfsbno_t x)
+{
+       return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
+}
+
+static inline xfs_fsblock_t nullstartblock(int k)
+{
+       ASSERT(k < (1 << STARTBLOCKVALBITS));
+       return STARTBLOCKMASK | (k);
+}
+
+static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
+{
+       return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
+}
+
+/*
+ * Possible extent formats.
+ */
+typedef enum {
+       XFS_EXTFMT_NOSTATE = 0,
+       XFS_EXTFMT_HASSTATE
+} xfs_exntfmt_t;
+
+/*
+ * Possible extent states.
+ */
+typedef enum {
+       XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+       XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
+} xfs_exntst_t;
+
+/*
+ * Incore version of above.
+ */
+typedef struct xfs_bmbt_irec
+{
+       xfs_fileoff_t   br_startoff;    /* starting file offset */
+       xfs_fsblock_t   br_startblock;  /* starting block number */
+       xfs_filblks_t   br_blockcount;  /* number of blocks */
+       xfs_exntst_t    br_state;       /* extent state */
+} xfs_bmbt_irec_t;
+
+/*
+ * Key structure for non-leaf levels of the tree.
+ */
+typedef struct xfs_bmbt_key {
+       __be64          br_startoff;    /* starting file offset */
+} xfs_bmbt_key_t, xfs_bmdr_key_t;
+
+/* btree pointer type */
+typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
+
+
+/*
+ * Generic Btree block format definitions
+ *
+ * This is a combination of the actual format used on disk for short and long
+ * format btrees.  The first three fields are shared by both format, but the
+ * pointers are different and should be used with care.
+ *
+ * To get the size of the actual short or long form headers please use the size
+ * macros below.  Never use sizeof(xfs_btree_block).
+ *
+ * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
+ * with the crc feature bit, and all accesses to them must be conditional on
+ * that flag.
+ */
+struct xfs_btree_block {
+       __be32          bb_magic;       /* magic number for block type */
+       __be16          bb_level;       /* 0 is a leaf */
+       __be16          bb_numrecs;     /* current # of data records */
+       union {
+               struct {
+                       __be32          bb_leftsib;
+                       __be32          bb_rightsib;
+
+                       __be64          bb_blkno;
+                       __be64          bb_lsn;
+                       uuid_t          bb_uuid;
+                       __be32          bb_owner;
+                       __le32          bb_crc;
+               } s;                    /* short form pointers */
+               struct  {
+                       __be64          bb_leftsib;
+                       __be64          bb_rightsib;
+
+                       __be64          bb_blkno;
+                       __be64          bb_lsn;
+                       uuid_t          bb_uuid;
+                       __be64          bb_owner;
+                       __le32          bb_crc;
+                       __be32          bb_pad; /* padding for alignment */
+               } l;                    /* long form pointers */
+       } bb_u;                         /* rest */
+};
+
+#define XFS_BTREE_SBLOCK_LEN   16      /* size of a short form block */
+#define XFS_BTREE_LBLOCK_LEN   24      /* size of a long form block */
+
+/* sizes of CRC enabled btree blocks */
+#define XFS_BTREE_SBLOCK_CRC_LEN       (XFS_BTREE_SBLOCK_LEN + 40)
+#define XFS_BTREE_LBLOCK_CRC_LEN       (XFS_BTREE_LBLOCK_LEN + 48)
+
+#define XFS_BTREE_SBLOCK_CRC_OFF \
+       offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
+#define XFS_BTREE_LBLOCK_CRC_OFF \
+       offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
 
 #endif /* __XFS_FORMAT_H__ */
index 1edb5cc3e5f495fdca3059054d1f2e7bd5d9fd58..c5fc116dfaa307b156e0b652bfffd4f80c173b89 100644 (file)
@@ -233,11 +233,11 @@ typedef struct xfs_fsop_resblks {
 #define XFS_FSOP_GEOM_FLAGS_LOGV2      0x0100  /* log format version 2 */
 #define XFS_FSOP_GEOM_FLAGS_SECTOR     0x0200  /* sector sizes >1BB    */
 #define XFS_FSOP_GEOM_FLAGS_ATTR2      0x0400  /* inline attributes rework */
-#define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
+#define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
 #define XFS_FSOP_GEOM_FLAGS_DIRV2CI    0x1000  /* ASCII only CI names  */
 #define XFS_FSOP_GEOM_FLAGS_LAZYSB     0x4000  /* lazy superblock counters */
 #define XFS_FSOP_GEOM_FLAGS_V5SB       0x8000  /* version 5 superblock */
-
+#define XFS_FSOP_GEOM_FLAGS_FTYPE      0x10000 /* inode directory types */
 
 /*
  * Minimum and maximum sizes need for growth checks.
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
 /*     XFS_IOC_GETBIOSIZE ---- deprecated 47      */
 #define XFS_IOC_GETBMAPX       _IOWR('X', 56, struct getbmap)
 #define XFS_IOC_ZERO_RANGE     _IOW ('X', 57, struct xfs_flock64)
-#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks)
+#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
 
 /*
  * ioctl commands that replace IRIX syssgi()'s
index e64ee5288b86be2d0c0267b383d3f6f9297e60a1..a6e54b3319bd0f5deb573623f332486590fbe165 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
 #include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_fsops.h"
 #include "xfs_itable.h"
 #include "xfs_trans_space.h"
 #include "xfs_rtalloc.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 /*
  * File system operations
@@ -101,7 +102,9 @@ xfs_fs_geometry(
                        (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
                                XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
                        (xfs_sb_version_hascrc(&mp->m_sb) ?
-                               XFS_FSOP_GEOM_FLAGS_V5SB : 0);
+                               XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
+                       (xfs_sb_version_hasftype(&mp->m_sb) ?
+                               XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
                geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
                                mp->m_sb.sb_logsectsize : BBSIZE;
                geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -153,7 +156,7 @@ xfs_growfs_data_private(
        xfs_buf_t               *bp;
        int                     bucket;
        int                     dpct;
-       int                     error;
+       int                     error, saved_error = 0;
        xfs_agnumber_t          nagcount;
        xfs_agnumber_t          nagimax = 0;
        xfs_rfsblock_t          nb, nb_mod;
@@ -496,29 +499,33 @@ xfs_growfs_data_private(
                                error = ENOMEM;
                }
 
+               /*
+                * If we get an error reading or writing alternate superblocks,
+                * continue.  xfs_repair chooses the "best" superblock based
+                * on most matches; if we break early, we'll leave more
+                * superblocks un-updated than updated, and xfs_repair may
+                * pick them over the properly-updated primary.
+                */
                if (error) {
                        xfs_warn(mp,
                "error %d reading secondary superblock for ag %d",
                                error, agno);
-                       break;
+                       saved_error = error;
+                       continue;
                }
                xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
 
-               /*
-                * If we get an error writing out the alternate superblocks,
-                * just issue a warning and continue.  The real work is
-                * already done and committed.
-                */
                error = xfs_bwrite(bp);
                xfs_buf_relse(bp);
                if (error) {
                        xfs_warn(mp,
                "write error %d updating secondary superblock for ag %d",
                                error, agno);
-                       break; /* no point in continuing */
+                       saved_error = error;
+                       continue;
                }
        }
-       return error;
+       return saved_error ? saved_error : error;
 
  error0:
        xfs_trans_cancel(tp, XFS_TRANS_ABORT);
index ccf2fb1439629fae273a239625f97f6879192878..14d732f61a410317442004f234a2e642399dd62b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_icreate_item.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
 
 
 /*
index 68c07320f096e53d31f3050850414e1038c79c4a..a8f76a5ff4184b316c53b78cb22445006926e90f 100644 (file)
@@ -23,6 +23,7 @@ struct xfs_dinode;
 struct xfs_imap;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_btree_cur;
 
 /*
  * Allocation parameters for inode allocation.
@@ -42,7 +43,7 @@ struct xfs_trans;
 static inline struct xfs_dinode *
 xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
 {
-       return (xfs_dinode_t *)
+       return (struct xfs_dinode *)
                (xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog));
 }
 
@@ -158,6 +159,4 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
                          xfs_agnumber_t agno, xfs_agblock_t agbno,
                          xfs_agblock_t length, unsigned int gen);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 #endif /* __XFS_IALLOC_H__ */
index 5448eb6b8c12ad1acdf9d621750ec39e6c280a80..1fa142dc86cbd6397dc52e0b7bf9191d12f74eeb 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC int
index 3ac36b7642e9a09960ead79d38583b08e1be1213..f38b22011c4e4604a5e9e74ec9f4afe344dccfdb 100644 (file)
@@ -26,55 +26,6 @@ struct xfs_buf;
 struct xfs_btree_cur;
 struct xfs_mount;
 
-/*
- * There is a btree for the inode map per allocation group.
- */
-#define        XFS_IBT_MAGIC           0x49414254      /* 'IABT' */
-#define        XFS_IBT_CRC_MAGIC       0x49414233      /* 'IAB3' */
-
-typedef        __uint64_t      xfs_inofree_t;
-#define        XFS_INODES_PER_CHUNK            (NBBY * sizeof(xfs_inofree_t))
-#define        XFS_INODES_PER_CHUNK_LOG        (XFS_NBBYLOG + 3)
-#define        XFS_INOBT_ALL_FREE              ((xfs_inofree_t)-1)
-#define        XFS_INOBT_MASK(i)               ((xfs_inofree_t)1 << (i))
-
-static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
-{
-       return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
-}
-
-/*
- * Data record structure
- */
-typedef struct xfs_inobt_rec {
-       __be32          ir_startino;    /* starting inode number */
-       __be32          ir_freecount;   /* count of free inodes (set bits) */
-       __be64          ir_free;        /* free inode mask */
-} xfs_inobt_rec_t;
-
-typedef struct xfs_inobt_rec_incore {
-       xfs_agino_t     ir_startino;    /* starting inode number */
-       __int32_t       ir_freecount;   /* count of free inodes (set bits) */
-       xfs_inofree_t   ir_free;        /* free inode mask */
-} xfs_inobt_rec_incore_t;
-
-
-/*
- * Key structure
- */
-typedef struct xfs_inobt_key {
-       __be32          ir_startino;    /* starting inode number */
-} xfs_inobt_key_t;
-
-/* btree pointer type */
-typedef __be32 xfs_inobt_ptr_t;
-
-/*
- * block numbers in the AG.
- */
-#define        XFS_IBT_BLOCK(mp)               ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
-#define        XFS_PREALLOC_BLOCKS(mp)         ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -110,6 +61,4 @@ extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
                struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
 extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
-
 #endif /* __XFS_IALLOC_BTREE_H__ */
index 193206ba43582c0aecbea7afd1d23220bfff554f..98d35244eecc936bdb0a9702ae70a9de1d980de8 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_dinode.h"
 #include "xfs_error.h"
-#include "xfs_filestream.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_fsops.h"
 #include "xfs_icache.h"
 #include "xfs_bmap_util.h"
 
@@ -119,11 +114,6 @@ xfs_inode_free(
                ip->i_itemp = NULL;
        }
 
-       /* asserts to verify all state is correct here */
-       ASSERT(atomic_read(&ip->i_pincount) == 0);
-       ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(!xfs_isiflocked(ip));
-
        /*
         * Because we use RCU freeing we need to ensure the inode always
         * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +125,10 @@ xfs_inode_free(
        ip->i_ino = 0;
        spin_unlock(&ip->i_flags_lock);
 
+       /* asserts to verify all state is correct here */
+       ASSERT(atomic_read(&ip->i_pincount) == 0);
+       ASSERT(!xfs_isiflocked(ip));
+
        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
@@ -501,11 +495,6 @@ xfs_inode_ag_walk_grab(
        if (!igrab(inode))
                return ENOENT;
 
-       if (is_bad_inode(inode)) {
-               IRELE(ip);
-               return ENOENT;
-       }
-
        /* inode is valid */
        return 0;
 
@@ -919,8 +908,6 @@ restart:
                xfs_iflock(ip);
        }
 
-       if (is_bad_inode(VFS_I(ip)))
-               goto reclaim;
        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
                xfs_iunpin_wait(ip);
                xfs_iflush_abort(ip, false);
index 5a5a593994d4196d3b18c2e959df90dc28c7588f..d2eaccfa73f4b14c622e66b39787396433e87604 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_icreate_item.h"
index e3d75385aa76a6e45b7711a65bb39c268c9f689b..326b94dbe159b99debd7b78dc4acca2136ab0aab 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_space.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_attr_sf.h"
 #include "xfs_attr.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans_space.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -1662,6 +1661,126 @@ xfs_release(
        return 0;
 }
 
+/*
+ * xfs_inactive_truncate
+ *
+ * Called to perform a truncate when an inode becomes unlinked.
+ */
+STATIC int
+xfs_inactive_truncate(
+       struct xfs_inode *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+       if (error) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
+       /*
+        * Log the inode size first to prevent stale data exposure in the event
+        * of a system crash before the truncate completes. See the related
+        * comment in xfs_setattr_size() for details.
+        */
+       ip->i_d.di_size = 0;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
+       if (error)
+               goto error_trans_cancel;
+
+       ASSERT(ip->i_d.di_nextents == 0);
+
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               goto error_unlock;
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return 0;
+
+error_trans_cancel:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return error;
+}
+
+/*
+ * xfs_inactive_ifree()
+ *
+ * Perform the inode free when an inode is unlinked.
+ */
+STATIC int
+xfs_inactive_ifree(
+       struct xfs_inode *ip)
+{
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       int                     committed;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
+       if (error) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
+       xfs_bmap_init(&free_list, &first_block);
+       error = xfs_ifree(tp, ip, &free_list);
+       if (error) {
+               /*
+                * If we fail to free the inode, shut down.  The cancel
+                * might do that, we need to make sure.  Otherwise the
+                * inode might be lost for a long time or forever.
+                */
+               if (!XFS_FORCED_SHUTDOWN(mp)) {
+                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
+                               __func__, error);
+                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+               }
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               return error;
+       }
+
+       /*
+        * Credit the quota account(s). The inode is gone.
+        */
+       xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+       /*
+        * Just ignore errors at this point.  There is nothing we can
+        * do except to try to keep going. Make sure it's not a silent
+        * error.
+        */
+       error = xfs_bmap_finish(&tp,  &free_list, &committed);
+       if (error)
+               xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+                       __func__, error);
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+                       __func__, error);
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return 0;
+}
+
 /*
  * xfs_inactive
  *
@@ -1670,16 +1789,11 @@ xfs_release(
  * now be truncated.  Also, we clear all of the read-ahead state
  * kept for the inode here since the file is now closed.
  */
-int
+void
 xfs_inactive(
        xfs_inode_t     *ip)
 {
-       xfs_bmap_free_t         free_list;
-       xfs_fsblock_t           first_block;
-       int                     committed;
-       struct xfs_trans        *tp;
        struct xfs_mount        *mp;
-       struct xfs_trans_res    *resp;
        int                     error;
        int                     truncate = 0;
 
@@ -1687,19 +1801,17 @@ xfs_inactive(
         * If the inode is already free, then there can be nothing
         * to clean up here.
         */
-       if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
+       if (ip->i_d.di_mode == 0) {
                ASSERT(ip->i_df.if_real_bytes == 0);
                ASSERT(ip->i_df.if_broot_bytes == 0);
-               return VN_INACTIVE_CACHE;
+               return;
        }
 
        mp = ip->i_mount;
 
-       error = 0;
-
        /* If this is a read-only mount, don't do this (would generate I/O) */
        if (mp->m_flags & XFS_MOUNT_RDONLY)
-               goto out;
+               return;
 
        if (ip->i_d.di_nlink != 0) {
                /*
@@ -1707,12 +1819,10 @@ xfs_inactive(
                 * cache. Post-eof blocks must be freed, lest we end up with
                 * broken free space accounting.
                 */
-               if (xfs_can_free_eofblocks(ip, true)) {
-                       error = xfs_free_eofblocks(mp, ip, false);
-                       if (error)
-                               return VN_INACTIVE_CACHE;
-               }
-               goto out;
+               if (xfs_can_free_eofblocks(ip, true))
+                       xfs_free_eofblocks(mp, ip, false);
+
+               return;
        }
 
        if (S_ISREG(ip->i_d.di_mode) &&
@@ -1722,36 +1832,14 @@ xfs_inactive(
 
        error = xfs_qm_dqattach(ip, 0);
        if (error)
-               return VN_INACTIVE_CACHE;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-       resp = (truncate || S_ISLNK(ip->i_d.di_mode)) ?
-               &M_RES(mp)->tr_itruncate : &M_RES(mp)->tr_ifree;
+               return;
 
-       error = xfs_trans_reserve(tp, resp, 0, 0);
-       if (error) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               xfs_trans_cancel(tp, 0);
-               return VN_INACTIVE_CACHE;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, 0);
-
-       if (S_ISLNK(ip->i_d.di_mode)) {
-               error = xfs_inactive_symlink(ip, &tp);
-               if (error)
-                       goto out_cancel;
-       } else if (truncate) {
-               ip->i_d.di_size = 0;
-               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
-               if (error)
-                       goto out_cancel;
-
-               ASSERT(ip->i_d.di_nextents == 0);
-       }
+       if (S_ISLNK(ip->i_d.di_mode))
+               error = xfs_inactive_symlink(ip);
+       else if (truncate)
+               error = xfs_inactive_truncate(ip);
+       if (error)
+               return;
 
        /*
         * If there are attributes associated with the file then blow them away
@@ -1762,25 +1850,9 @@ xfs_inactive(
        if (ip->i_d.di_anextents > 0) {
                ASSERT(ip->i_d.di_forkoff != 0);
 
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       goto out_unlock;
-
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
                error = xfs_attr_inactive(ip);
                if (error)
-                       goto out;
-
-               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
-               if (error) {
-                       xfs_trans_cancel(tp, 0);
-                       goto out;
-               }
-
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, 0);
+                       return;
        }
 
        if (ip->i_afp)
@@ -1791,52 +1863,14 @@ xfs_inactive(
        /*
         * Free the inode.
         */
-       xfs_bmap_init(&free_list, &first_block);
-       error = xfs_ifree(tp, ip, &free_list);
-       if (error) {
-               /*
-                * If we fail to free the inode, shut down.  The cancel
-                * might do that, we need to make sure.  Otherwise the
-                * inode might be lost for a long time or forever.
-                */
-               if (!XFS_FORCED_SHUTDOWN(mp)) {
-                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
-                               __func__, error);
-                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-               }
-               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       } else {
-               /*
-                * Credit the quota account(s). The inode is gone.
-                */
-               xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
-
-               /*
-                * Just ignore errors at this point.  There is nothing we can
-                * do except to try to keep going. Make sure it's not a silent
-                * error.
-                */
-               error = xfs_bmap_finish(&tp,  &free_list, &committed);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
-                               __func__, error);
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
-                               __func__, error);
-       }
+       error = xfs_inactive_ifree(ip);
+       if (error)
+               return;
 
        /*
         * Release the dquots held by inode, if any.
         */
        xfs_qm_dqdetach(ip);
-out_unlock:
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
-       return VN_INACTIVE_CACHE;
-out_cancel:
-       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       goto out_unlock;
 }
 
 /*
index 4a91358c1470b9ac029d451dd38c3fa77daf6fc0..66675877f38cd273d7b9fe51e10d93980b9f8422 100644 (file)
@@ -24,7 +24,6 @@
 /*
  * Kernel only inode definitions
  */
-
 struct xfs_dinode;
 struct xfs_inode;
 struct xfs_buf;
@@ -316,7 +315,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
 
 
 int            xfs_release(struct xfs_inode *ip);
-int            xfs_inactive(struct xfs_inode *ip);
+void           xfs_inactive(struct xfs_inode *ip);
 int            xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
                           struct xfs_inode **ipp, struct xfs_name *ci_name);
 int            xfs_create(struct xfs_inode *dp, struct xfs_name *name,
index 63382d37f5658c8ee774668df7a50a87eeec5834..4fc9f39dd89e7b8ed64e271ca6ced6bc43a191f6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_icache.h"
+#include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dinode.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
index abba0ae8cf2da2b4012445bc2cc5cbf63b8c9a66..9308c47f2a527dc08b75b66de5d064e0b13e0cfe 100644 (file)
@@ -47,7 +47,4 @@ void  xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
 #define        xfs_inobp_check(mp, bp)
 #endif /* DEBUG */
 
-extern const struct xfs_buf_ops xfs_inode_buf_ops;
-extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-
 #endif /* __XFS_INODE_BUF_H__ */
index 02f1083955bb1dfa19c295adea18b663b25f93f7..22c9837c5d4beb680514213369aa0722d189503f 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_filestream.h"
-#include "xfs_cksum.h"
 #include "xfs_trace.h"
-#include "xfs_icache.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -1359,7 +1349,7 @@ xfs_iext_remove_indirect(
 void
 xfs_iext_realloc_direct(
        xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             new_size)       /* new size of extents */
+       int             new_size)       /* new size of extents after adding */
 {
        int             rnew_size;      /* real new size of extents */
 
@@ -1397,13 +1387,8 @@ xfs_iext_realloc_direct(
                                rnew_size - ifp->if_real_bytes);
                }
        }
-       /*
-        * Switch from the inline extent buffer to a direct
-        * extent list. Be sure to include the inline extent
-        * bytes in new_size.
-        */
+       /* Switch from the inline extent buffer to a direct extent list */
        else {
-               new_size += ifp->if_bytes;
                if (!is_power_of_2(new_size)) {
                        rnew_size = roundup_pow_of_two(new_size);
                }
index 28661a0d90583bc2d20ab37089b79f921eaf135c..eb329a1ea8886a3d878489765bc5d13dbbf26068 100644 (file)
@@ -19,6 +19,7 @@
 #define        __XFS_INODE_FORK_H__
 
 struct xfs_inode_log_item;
+struct xfs_dinode;
 
 /*
  * The following xfs_ext_irec_t struct introduces a second (top) level
index 378081109844b09b2bbfd07dcb4214027fefe2c2..7c0d391f9a6e0bd3f4f6a72cc6f0e65323cc7183 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_trans_priv.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
 
 
 kmem_zone_t    *xfs_ili_zone;          /* inode log item zone */
index 668e8f4ccf5e7201e8e6a360cd26668484f6d515..4d613401a5e056a08dd2f8b21b77833bfe14cbc6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ioctl.h"
+#include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
 #include "xfs_attr.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_buf_item.h"
 #include "xfs_fsops.h"
 #include "xfs_discard.h"
 #include "xfs_quota.h"
-#include "xfs_inode_item.h"
 #include "xfs_export.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_dinode.h"
+#include "xfs_trans.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -641,7 +640,11 @@ xfs_ioc_space(
        unsigned int            cmd,
        xfs_flock64_t           *bf)
 {
-       int                     attr_flags = 0;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       struct iattr            iattr;
+       bool                    setprealloc = false;
+       bool                    clrprealloc = false;
        int                     error;
 
        /*
@@ -661,19 +664,128 @@ xfs_ioc_space(
        if (!S_ISREG(inode->i_mode))
                return -XFS_ERROR(EINVAL);
 
-       if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-               attr_flags |= XFS_ATTR_NONBLOCK;
+       error = mnt_want_write_file(filp);
+       if (error)
+               return error;
 
-       if (filp->f_flags & O_DSYNC)
-               attr_flags |= XFS_ATTR_SYNC;
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       switch (bf->l_whence) {
+       case 0: /*SEEK_SET*/
+               break;
+       case 1: /*SEEK_CUR*/
+               bf->l_start += filp->f_pos;
+               break;
+       case 2: /*SEEK_END*/
+               bf->l_start += XFS_ISIZE(ip);
+               break;
+       default:
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
 
-       if (ioflags & IO_INVIS)
-               attr_flags |= XFS_ATTR_DMI;
+       /*
+        * length of <= 0 for resv/unresv/zero is invalid.  length for
+        * alloc/free is ignored completely and we have no idea what userspace
+        * might have set it to, so set it to zero to allow range
+        * checks to pass.
+        */
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               if (bf->l_len <= 0) {
+                       error = XFS_ERROR(EINVAL);
+                       goto out_unlock;
+               }
+               break;
+       default:
+               bf->l_len = 0;
+               break;
+       }
+
+       if (bf->l_start < 0 ||
+           bf->l_start > mp->m_super->s_maxbytes ||
+           bf->l_start + bf->l_len < 0 ||
+           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+               error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
+               if (!error)
+                       setprealloc = true;
+               break;
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+               error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
+                                               XFS_BMAPI_PREALLOC);
+               if (!error)
+                       setprealloc = true;
+               break;
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
+               break;
+       case XFS_IOC_ALLOCSP:
+       case XFS_IOC_ALLOCSP64:
+       case XFS_IOC_FREESP:
+       case XFS_IOC_FREESP64:
+               if (bf->l_start > XFS_ISIZE(ip)) {
+                       error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+                                       bf->l_start - XFS_ISIZE(ip), 0);
+                       if (error)
+                               goto out_unlock;
+               }
+
+               iattr.ia_valid = ATTR_SIZE;
+               iattr.ia_size = bf->l_start;
+
+               error = xfs_setattr_size(ip, &iattr);
+               if (!error)
+                       clrprealloc = true;
+               break;
+       default:
+               ASSERT(0);
+               error = XFS_ERROR(EINVAL);
+       }
 
-       error = mnt_want_write_file(filp);
        if (error)
-               return error;
-       error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
+               goto out_unlock;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_unlock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       if (!(ioflags & IO_INVIS)) {
+               ip->i_d.di_mode &= ~S_ISUID;
+               if (ip->i_d.di_mode & S_IXGRP)
+                       ip->i_d.di_mode &= ~S_ISGID;
+               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       }
+
+       if (setprealloc)
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+       else if (clrprealloc)
+               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       if (filp->f_flags & O_DSYNC)
+               xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
+
+out_unlock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
        mnt_drop_write_file(filp);
        return -error;
 }
index f671f7e472ac008511ca4df2068c9d4fb91d169c..e8fb1231db8124dc08b2ffbcc551d6bfa1bc21f5 100644 (file)
 #include <asm/uaccess.h>
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_vnode.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
index 8d4d49b6fbf347b3add01ed4675b489a653a6dfb..22d1cbea283d4734515218ef65b23ec78bfdeff6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
 #include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_iomap.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_quota.h"
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
+#include "xfs_dinode.h"
 
 
 #define XFS_WRITEIO_ALIGN(mp,off)      (((off) >> mp->m_writeio_log) \
@@ -110,7 +104,7 @@ xfs_alert_fsblock_zero(
        xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
                        "Access to block zero in inode %llu "
                        "start_block: %llx start_off: %llx "
-                       "blkcnt: %llx extent-state: %x\n",
+                       "blkcnt: %llx extent-state: %x",
                (unsigned long long)ip->i_ino,
                (unsigned long long)imap->br_startblock,
                (unsigned long long)imap->br_startoff,
@@ -655,7 +649,6 @@ int
 xfs_iomap_write_allocate(
        xfs_inode_t     *ip,
        xfs_off_t       offset,
-       size_t          count,
        xfs_bmbt_irec_t *imap)
 {
        xfs_mount_t     *mp = ip->i_mount;
index 80615760959ae169dddb471af1964c4ee03f448e..411fbb8919ef02456e82bb39c7b97443bf1e4da0 100644 (file)
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
                        struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
index 2b8952d9199bbd145473a48b326120b8d43ed9b6..718b62b0fe05165d3749249869dddc0eebede87e 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_acl.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_acl.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2_priv.h"
+#include "xfs_dinode.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -709,8 +705,7 @@ out_dqrele:
 int
 xfs_setattr_size(
        struct xfs_inode        *ip,
-       struct iattr            *iattr,
-       int                     flags)
+       struct iattr            *iattr)
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct inode            *inode = VFS_I(ip);
@@ -733,15 +728,11 @@ xfs_setattr_size(
        if (error)
                return XFS_ERROR(error);
 
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        ASSERT(S_ISREG(ip->i_d.di_mode));
        ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
                        ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
-       if (!(flags & XFS_ATTR_NOLOCK)) {
-               lock_flags |= XFS_IOLOCK_EXCL;
-               xfs_ilock(ip, lock_flags);
-       }
-
        oldsize = inode->i_size;
        newsize = iattr->ia_size;
 
@@ -750,12 +741,11 @@ xfs_setattr_size(
         */
        if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
                if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
-                       goto out_unlock;
+                       return 0;
 
                /*
                 * Use the regular setattr path to update the timestamps.
                 */
-               xfs_iunlock(ip, lock_flags);
                iattr->ia_valid &= ~ATTR_SIZE;
                return xfs_setattr_nonsize(ip, iattr, 0);
        }
@@ -765,7 +755,7 @@ xfs_setattr_size(
         */
        error = xfs_qm_dqattach(ip, 0);
        if (error)
-               goto out_unlock;
+               return error;
 
        /*
         * Now we can make the changes.  Before we join the inode to the
@@ -783,7 +773,7 @@ xfs_setattr_size(
                 */
                error = xfs_zero_eof(ip, newsize, oldsize);
                if (error)
-                       goto out_unlock;
+                       return error;
        }
 
        /*
@@ -802,7 +792,7 @@ xfs_setattr_size(
                error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                                      ip->i_d.di_size, newsize);
                if (error)
-                       goto out_unlock;
+                       return error;
        }
 
        /*
@@ -812,7 +802,7 @@ xfs_setattr_size(
 
        error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
        if (error)
-               goto out_unlock;
+               return error;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
        error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
@@ -916,12 +906,21 @@ out_trans_cancel:
 
 STATIC int
 xfs_vn_setattr(
-       struct dentry   *dentry,
-       struct iattr    *iattr)
+       struct dentry           *dentry,
+       struct iattr            *iattr)
 {
-       if (iattr->ia_valid & ATTR_SIZE)
-               return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
-       return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
+       struct xfs_inode        *ip = XFS_I(dentry->d_inode);
+       int                     error;
+
+       if (iattr->ia_valid & ATTR_SIZE) {
+               xfs_ilock(ip, XFS_IOLOCK_EXCL);
+               error = xfs_setattr_size(ip, iattr);
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       } else {
+               error = xfs_setattr_nonsize(ip, iattr, 0);
+       }
+
+       return -error;
 }
 
 STATIC int
index d81fb41205ec97b9a00ecc0789e99763adc5af71..d2c5057b5cc4b4b701d05478e045cbf02a3ed408 100644 (file)
@@ -30,14 +30,10 @@ extern void xfs_setup_inode(struct xfs_inode *);
 /*
  * Internal setattr interfaces.
  */
-#define        XFS_ATTR_DMI            0x01    /* invocation from a DMI function */
-#define        XFS_ATTR_NONBLOCK       0x02    /* return EAGAIN if op would block */
-#define XFS_ATTR_NOLOCK                0x04    /* Don't grab any conflicting locks */
-#define XFS_ATTR_NOACL         0x08    /* Don't call xfs_acl_chmod */
-#define XFS_ATTR_SYNC          0x10    /* synchronous operation required */
+#define XFS_ATTR_NOACL         0x01    /* Don't call xfs_acl_chmod */
 
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
                               int flags);
-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
+extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
 
 #endif /* __XFS_IOPS_H__ */
index 084b3e1741fd0346cac1d8279ed8085553ee7486..c237ad15d500f767b81014a428720a7351431077 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
-#include "xfs_btree.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
 
 STATIC int
 xfs_internal_inum(
index a2dea108071ae6e81d0e683a98a4a011b74f23ee..e523396753c5068cebe7ee8dd3b889623bf1c8f0 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_log_recover.h"
-#include "xfs_trans_priv.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_trace.h"
 #include "xfs_fsops.h"
@@ -1000,27 +998,34 @@ xfs_log_space_wake(
 }
 
 /*
- * Determine if we have a transaction that has gone to disk
- * that needs to be covered. To begin the transition to the idle state
- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
- * If we are then in a state where covering is needed, the caller is informed
- * that dummy transactions are required to move the log into the idle state.
+ * Determine if we have a transaction that has gone to disk that needs to be
+ * covered. To begin the transition to the idle state firstly the log needs to
+ * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
+ * we start attempting to cover the log.
+ *
+ * Only if we are then in a state where covering is needed, the caller is
+ * informed that dummy transactions are required to move the log into the idle
+ * state.
  *
- * Because this is called as part of the sync process, we should also indicate
- * that dummy transactions should be issued in anything but the covered or
- * idle states. This ensures that the log tail is accurately reflected in
- * the log at the end of the sync, hence if a crash occurrs avoids replay
- * of transactions where the metadata is already on disk.
+ * If there are any items in the AIl or CIL, then we do not want to attempt to
+ * cover the log as we may be in a situation where there isn't log space
+ * available to run a dummy transaction and this can lead to deadlocks when the
+ * tail of the log is pinned by an item that is modified in the CIL.  Hence
+ * there's no point in running a dummy transaction at this point because we
+ * can't start trying to idle the log until both the CIL and AIL are empty.
  */
 int
 xfs_log_need_covered(xfs_mount_t *mp)
 {
-       int             needed = 0;
        struct xlog     *log = mp->m_log;
+       int             needed = 0;
 
        if (!xfs_fs_writable(mp))
                return 0;
 
+       if (!xlog_cil_empty(log))
+               return 0;
+
        spin_lock(&log->l_icloglock);
        switch (log->l_covered_state) {
        case XLOG_STATE_COVER_DONE:
@@ -1029,14 +1034,17 @@ xfs_log_need_covered(xfs_mount_t *mp)
                break;
        case XLOG_STATE_COVER_NEED:
        case XLOG_STATE_COVER_NEED2:
-               if (!xfs_ail_min_lsn(log->l_ailp) &&
-                   xlog_iclogs_empty(log)) {
-                       if (log->l_covered_state == XLOG_STATE_COVER_NEED)
-                               log->l_covered_state = XLOG_STATE_COVER_DONE;
-                       else
-                               log->l_covered_state = XLOG_STATE_COVER_DONE2;
-               }
-               /* FALLTHRU */
+               if (xfs_ail_min_lsn(log->l_ailp))
+                       break;
+               if (!xlog_iclogs_empty(log))
+                       break;
+
+               needed = 1;
+               if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+                       log->l_covered_state = XLOG_STATE_COVER_DONE;
+               else
+                       log->l_covered_state = XLOG_STATE_COVER_DONE2;
+               break;
        default:
                needed = 1;
                break;
@@ -1979,7 +1987,7 @@ xlog_print_tic_res(
 
        for (i = 0; i < ticket->t_res_num; i++) {
                uint r_type = ticket->t_res_arr[i].r_type;
-               xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
+               xfs_warn(mp, "region[%u]: %s - %u bytes", i,
                            ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
                            "bad-rtype" : res_type_str[r_type-1]),
                            ticket->t_res_arr[i].r_len);
index 1c458487f000a42306cb44f14509d80fea0c2f02..e148719e0a5d59dba022034cd5e988ae47552bde 100644 (file)
@@ -18,8 +18,6 @@
 #ifndef        __XFS_LOG_H__
 #define __XFS_LOG_H__
 
-#include "xfs_log_format.h"
-
 struct xfs_log_vec {
        struct xfs_log_vec      *lv_next;       /* next lv in build list */
        int                     lv_niovecs;     /* number of iovecs in lv */
@@ -82,11 +80,7 @@ struct xlog_ticket;
 struct xfs_log_item;
 struct xfs_item_ops;
 struct xfs_trans;
-
-void   xfs_log_item_init(struct xfs_mount      *mp,
-                       struct xfs_log_item     *item,
-                       int                     type,
-                       const struct xfs_item_ops *ops);
+struct xfs_log_callback;
 
 xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
                       struct xlog_ticket *ticket,
@@ -114,7 +108,7 @@ xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
 void     xfs_log_space_wake(struct xfs_mount *mp);
 int      xfs_log_notify(struct xfs_mount       *mp,
                         struct xlog_in_core    *iclog,
-                        xfs_log_callback_t     *callback_entry);
+                        struct xfs_log_callback *callback_entry);
 int      xfs_log_release_iclog(struct xfs_mount *mp,
                         struct xlog_in_core     *iclog);
 int      xfs_log_reserve(struct xfs_mount *mp,
index cfe97973ba36d1d586c3704b536aebce2e391af1..5eb51fc5eb844f2553e5a2d4711c9a32a74bdd18 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
 
 /*
  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
@@ -711,6 +713,20 @@ xlog_cil_push_foreground(
        xlog_cil_push(log);
 }
 
+bool
+xlog_cil_empty(
+       struct xlog     *log)
+{
+       struct xfs_cil  *cil = log->l_cilp;
+       bool            empty = false;
+
+       spin_lock(&cil->xc_push_lock);
+       if (list_empty(&cil->xc_cil))
+               empty = true;
+       spin_unlock(&cil->xc_push_lock);
+       return empty;
+}
+
 /*
  * Commit a transaction with the given vector to the Committed Item List.
  *
index ca7e28a8ed31d996f7e56862e38af43b449235cc..f0969c77bdbe1ea7d87e732ba27a8c4d3fbd54c4 100644 (file)
@@ -233,178 +233,6 @@ typedef struct xfs_trans_header {
        { XFS_LI_QUOTAOFF,      "XFS_LI_QUOTAOFF" }, \
        { XFS_LI_ICREATE,       "XFS_LI_ICREATE" }
 
-/*
- * Transaction types.  Used to distinguish types of buffers.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE     1
-#define XFS_TRANS_SETATTR_SIZE         2
-#define XFS_TRANS_INACTIVE             3
-#define XFS_TRANS_CREATE               4
-#define XFS_TRANS_CREATE_TRUNC         5
-#define XFS_TRANS_TRUNCATE_FILE                6
-#define XFS_TRANS_REMOVE               7
-#define XFS_TRANS_LINK                 8
-#define XFS_TRANS_RENAME               9
-#define XFS_TRANS_MKDIR                        10
-#define XFS_TRANS_RMDIR                        11
-#define XFS_TRANS_SYMLINK              12
-#define XFS_TRANS_SET_DMATTRS          13
-#define XFS_TRANS_GROWFS               14
-#define XFS_TRANS_STRAT_WRITE          15
-#define XFS_TRANS_DIOSTRAT             16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define        XFS_TRANS_WRITEID               18
-#define        XFS_TRANS_ADDAFORK              19
-#define        XFS_TRANS_ATTRINVAL             20
-#define        XFS_TRANS_ATRUNCATE             21
-#define        XFS_TRANS_ATTR_SET              22
-#define        XFS_TRANS_ATTR_RM               23
-#define        XFS_TRANS_ATTR_FLAG             24
-#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
-#define XFS_TRANS_QM_SBCHANGE          26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1               27
-#define XFS_TRANS_DUMMY2               28
-#define XFS_TRANS_QM_QUOTAOFF          29
-#define XFS_TRANS_QM_DQALLOC           30
-#define XFS_TRANS_QM_SETQLIM           31
-#define XFS_TRANS_QM_DQCLUSTER         32
-#define XFS_TRANS_QM_QINOCREATE                33
-#define XFS_TRANS_QM_QUOTAOFF_END      34
-#define XFS_TRANS_SB_UNIT              35
-#define XFS_TRANS_FSYNC_TS             36
-#define        XFS_TRANS_GROWFSRT_ALLOC        37
-#define        XFS_TRANS_GROWFSRT_ZERO         38
-#define        XFS_TRANS_GROWFSRT_FREE         39
-#define        XFS_TRANS_SWAPEXT               40
-#define        XFS_TRANS_SB_COUNT              41
-#define        XFS_TRANS_CHECKPOINT            42
-#define        XFS_TRANS_ICREATE               43
-#define        XFS_TRANS_TYPE_MAX              43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
-       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
-       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
-       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
-       { XFS_TRANS_CREATE,             "CREATE" }, \
-       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
-       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
-       { XFS_TRANS_REMOVE,             "REMOVE" }, \
-       { XFS_TRANS_LINK,               "LINK" }, \
-       { XFS_TRANS_RENAME,             "RENAME" }, \
-       { XFS_TRANS_MKDIR,              "MKDIR" }, \
-       { XFS_TRANS_RMDIR,              "RMDIR" }, \
-       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
-       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
-       { XFS_TRANS_GROWFS,             "GROWFS" }, \
-       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
-       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
-       { XFS_TRANS_WRITEID,            "WRITEID" }, \
-       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
-       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
-       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
-       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
-       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
-       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
-       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
-       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
-       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
-       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
-       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
-       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
-       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
-       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
-       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
-       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
-       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
-       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
-       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
-       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
-       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
-       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
-       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
-       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
-       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
-
-/*
- * This structure is used to track log items associated with
- * a transaction.  It points to the log item and keeps some
- * flags to track the state of the log item.  It also tracks
- * the amount of space needed to log the item it describes
- * once we get to commit processing (see xfs_trans_commit()).
- */
-struct xfs_log_item_desc {
-       struct xfs_log_item     *lid_item;
-       struct list_head        lid_trans;
-       unsigned char           lid_flags;
-};
-
-#define XFS_LID_DIRTY          0x1
-
-/*
- * Values for t_flags.
- */
-#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
-#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
-#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
-#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
-#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
-#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
-                                          count in superblock */
-
-/*
- * Values for call flags parameter.
- */
-#define        XFS_TRANS_RELEASE_LOG_RES       0x4
-#define        XFS_TRANS_ABORT                 0x8
-
-/*
- * Field values for xfs_trans_mod_sb.
- */
-#define        XFS_TRANS_SB_ICOUNT             0x00000001
-#define        XFS_TRANS_SB_IFREE              0x00000002
-#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
-#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
-#define        XFS_TRANS_SB_FREXTENTS          0x00000010
-#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
-#define        XFS_TRANS_SB_DBLOCKS            0x00000040
-#define        XFS_TRANS_SB_AGCOUNT            0x00000080
-#define        XFS_TRANS_SB_IMAXPCT            0x00000100
-#define        XFS_TRANS_SB_REXTSIZE           0x00000200
-#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
-#define        XFS_TRANS_SB_RBLOCKS            0x00000800
-#define        XFS_TRANS_SB_REXTENTS           0x00001000
-#define        XFS_TRANS_SB_REXTSLOG           0x00002000
-
-/*
- * Here we centralize the specification of XFS meta-data buffer
- * reference count values.  This determine how hard the buffer
- * cache tries to hold onto the buffer.
- */
-#define        XFS_AGF_REF             4
-#define        XFS_AGI_REF             4
-#define        XFS_AGFL_REF            3
-#define        XFS_INO_BTREE_REF       3
-#define        XFS_ALLOC_BTREE_REF     2
-#define        XFS_BMAP_BTREE_REF      2
-#define        XFS_DIR_BTREE_REF       2
-#define        XFS_INO_REF             2
-#define        XFS_ATTR_BTREE_REF      1
-#define        XFS_DQUOT_REF           1
-
-/*
- * Flags for xfs_trans_ichgtime().
- */
-#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
-#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
-#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
-
-
 /*
  * Inode Log Item Format definitions.
  *
@@ -797,7 +625,6 @@ typedef struct xfs_qoff_logformat {
        char                    qf_pad[12];     /* padding for future */
 } xfs_qoff_logformat_t;
 
-
 /*
  * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
  */
@@ -849,8 +676,4 @@ struct xfs_icreate_log {
        __be32          icl_gen;        /* inode generation number to use */
 };
 
-int    xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
-int    xfs_log_calc_minimum_size(struct xfs_mount *);
-
-
 #endif /* __XFS_LOG_FORMAT_H__ */
index 136654b9400df9b28a40415b1fbca3c9b7d6a958..9bc403a9e54f300f570e6e200574c36fb46db9b1 100644 (file)
@@ -22,6 +22,7 @@ struct xfs_buf;
 struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
+struct xfs_log_callback;
 
 /*
  * Flags for log structure
@@ -227,8 +228,8 @@ typedef struct xlog_in_core {
 
        /* Callback structures need their own cacheline */
        spinlock_t              ic_callback_lock ____cacheline_aligned_in_smp;
-       xfs_log_callback_t      *ic_callback;
-       xfs_log_callback_t      **ic_callback_tail;
+       struct xfs_log_callback *ic_callback;
+       struct xfs_log_callback **ic_callback_tail;
 
        /* reference counts need their own cacheline */
        atomic_t                ic_refcnt ____cacheline_aligned_in_smp;
@@ -254,7 +255,7 @@ struct xfs_cil_ctx {
        int                     space_used;     /* aggregate size of regions */
        struct list_head        busy_extents;   /* busy extents in chkpt */
        struct xfs_log_vec      *lv_chain;      /* logvecs being pushed */
-       xfs_log_callback_t      log_cb;         /* completion callback hook. */
+       struct xfs_log_callback log_cb;         /* completion callback hook. */
        struct list_head        committing;     /* ctx committing list */
 };
 
@@ -514,12 +515,10 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
 /*
  * Committed Item List interfaces
  */
-int
-xlog_cil_init(struct xlog *log);
-void
-xlog_cil_init_post_recovery(struct xlog *log);
-void
-xlog_cil_destroy(struct xlog *log);
+int    xlog_cil_init(struct xlog *log);
+void   xlog_cil_init_post_recovery(struct xlog *log);
+void   xlog_cil_destroy(struct xlog *log);
+bool   xlog_cil_empty(struct xlog *log);
 
 /*
  * CIL force routines
index dabda9521b4becc2ded846307aa062a093a7a3d3..b6b669df40f3ab335e75cd3a67903601be0128a4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
 #include "xfs_log_recover.h"
+#include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_trans_priv.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
 #include "xfs_quota.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_icreate_item.h"
-
-/* Need all the magic numbers and buffer ops structures from these headers */
-#include "xfs_symlink.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_error.h"
 #include "xfs_dir2.h"
-#include "xfs_attr_leaf.h"
-#include "xfs_attr_remote.h"
 
 #define BLK_AVG(blk1, blk2)    ((blk1+blk2) >> 1)
 
@@ -305,9 +297,9 @@ xlog_header_check_dump(
        xfs_mount_t             *mp,
        xlog_rec_header_t       *head)
 {
-       xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
+       xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
                __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
-       xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
+       xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
                &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 }
 #else
@@ -1585,6 +1577,7 @@ xlog_recover_add_to_trans(
                "bad number of regions (%d) in inode log format",
                                  in_f->ilf_size);
                        ASSERT(0);
+                       kmem_free(ptr);
                        return XFS_ERROR(EIO);
                }
 
@@ -1970,6 +1963,13 @@ xlog_recover_do_inode_buffer(
  * magic number.  If we don't recognise the magic number in the buffer, then
  * return a LSN of -1 so that the caller knows it was an unrecognised block and
  * so can recover the buffer.
+ *
+ * Note: we cannot rely solely on magic number matches to determine that the
+ * buffer has a valid LSN - we also need to verify that it belongs to this
+ * filesystem, so we need to extract the object's LSN and compare it to that
+ * which we read from the superblock. If the UUIDs don't match, then we've got a
+ * stale metadata block from an old filesystem instance that we need to recover
+ * over the top of.
  */
 static xfs_lsn_t
 xlog_recover_get_buf_lsn(
@@ -1980,6 +1980,8 @@ xlog_recover_get_buf_lsn(
        __uint16_t              magic16;
        __uint16_t              magicda;
        void                    *blk = bp->b_addr;
+       uuid_t                  *uuid;
+       xfs_lsn_t               lsn = -1;
 
        /* v4 filesystems always recover immediately */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +1994,79 @@ xlog_recover_get_buf_lsn(
        case XFS_ABTB_MAGIC:
        case XFS_ABTC_MAGIC:
        case XFS_IBT_CRC_MAGIC:
-       case XFS_IBT_MAGIC:
-               return be64_to_cpu(
-                               ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
+       case XFS_IBT_MAGIC: {
+               struct xfs_btree_block *btb = blk;
+
+               lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
+               uuid = &btb->bb_u.s.bb_uuid;
+               break;
+       }
        case XFS_BMAP_CRC_MAGIC:
-       case XFS_BMAP_MAGIC:
-               return be64_to_cpu(
-                               ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
+       case XFS_BMAP_MAGIC: {
+               struct xfs_btree_block *btb = blk;
+
+               lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
+               uuid = &btb->bb_u.l.bb_uuid;
+               break;
+       }
        case XFS_AGF_MAGIC:
-               return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+               lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+               uuid = &((struct xfs_agf *)blk)->agf_uuid;
+               break;
        case XFS_AGFL_MAGIC:
-               return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+               lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+               uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
+               break;
        case XFS_AGI_MAGIC:
-               return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+               lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+               uuid = &((struct xfs_agi *)blk)->agi_uuid;
+               break;
        case XFS_SYMLINK_MAGIC:
-               return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+               lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+               uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
+               break;
        case XFS_DIR3_BLOCK_MAGIC:
        case XFS_DIR3_DATA_MAGIC:
        case XFS_DIR3_FREE_MAGIC:
-               return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+               lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+               uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
+               break;
        case XFS_ATTR3_RMT_MAGIC:
-               return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+               lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+               uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
+               break;
        case XFS_SB_MAGIC:
-               return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+               lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+               uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+               break;
        default:
                break;
        }
 
+       if (lsn != (xfs_lsn_t)-1) {
+               if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+                       goto recover_immediately;
+               return lsn;
+       }
+
        magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
        switch (magicda) {
        case XFS_DIR3_LEAF1_MAGIC:
        case XFS_DIR3_LEAFN_MAGIC:
        case XFS_DA3_NODE_MAGIC:
-               return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+               lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+               uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
+               break;
        default:
                break;
        }
 
+       if (lsn != (xfs_lsn_t)-1) {
+               if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+                       goto recover_immediately;
+               return lsn;
+       }
+
        /*
         * We do individual object checks on dquot and inode buffers as they
         * have their own individual LSN records. Also, we could have a stale
@@ -2316,7 +2354,7 @@ xlog_recover_do_reg_buffer(
                                        item->ri_buf[i].i_len, __func__);
                                goto next;
                        }
-                       error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
+                       error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
                                               -1, 0, XFS_QMOPT_DOWARN,
                                               "dquot_buf_recover");
                        if (error)
@@ -2347,133 +2385,6 @@ xlog_recover_do_reg_buffer(
                xlog_recover_validate_buf_type(mp, bp, buf_f);
 }
 
-/*
- * Do some primitive error checking on ondisk dquot data structures.
- */
-int
-xfs_qm_dqcheck(
-       struct xfs_mount *mp,
-       xfs_disk_dquot_t *ddq,
-       xfs_dqid_t       id,
-       uint             type,    /* used only when IO_dorepair is true */
-       uint             flags,
-       char             *str)
-{
-       xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
-       int             errs = 0;
-
-       /*
-        * We can encounter an uninitialized dquot buffer for 2 reasons:
-        * 1. If we crash while deleting the quotainode(s), and those blks got
-        *    used for user data. This is because we take the path of regular
-        *    file deletion; however, the size field of quotainodes is never
-        *    updated, so all the tricks that we play in itruncate_finish
-        *    don't quite matter.
-        *
-        * 2. We don't play the quota buffers when there's a quotaoff logitem.
-        *    But the allocation will be replayed so we'll end up with an
-        *    uninitialized quota block.
-        *
-        * This is all fine; things are still consistent, and we haven't lost
-        * any quota information. Just don't complain about bad dquot blks.
-        */
-       if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
-                       str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
-               errs++;
-       }
-       if (ddq->d_version != XFS_DQUOT_VERSION) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
-                       str, id, ddq->d_version, XFS_DQUOT_VERSION);
-               errs++;
-       }
-
-       if (ddq->d_flags != XFS_DQ_USER &&
-           ddq->d_flags != XFS_DQ_PROJ &&
-           ddq->d_flags != XFS_DQ_GROUP) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
-                       str, id, ddq->d_flags);
-               errs++;
-       }
-
-       if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : ondisk-dquot 0x%p, ID mismatch: "
-                       "0x%x expected, found id 0x%x",
-                       str, ddq, id, be32_to_cpu(ddq->d_id));
-               errs++;
-       }
-
-       if (!errs && ddq->d_id) {
-               if (ddq->d_blk_softlimit &&
-                   be64_to_cpu(ddq->d_bcount) >
-                               be64_to_cpu(ddq->d_blk_softlimit)) {
-                       if (!ddq->d_btimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-               if (ddq->d_ino_softlimit &&
-                   be64_to_cpu(ddq->d_icount) >
-                               be64_to_cpu(ddq->d_ino_softlimit)) {
-                       if (!ddq->d_itimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-               if (ddq->d_rtb_softlimit &&
-                   be64_to_cpu(ddq->d_rtbcount) >
-                               be64_to_cpu(ddq->d_rtb_softlimit)) {
-                       if (!ddq->d_rtbtimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-       }
-
-       if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
-               return errs;
-
-       if (flags & XFS_QMOPT_DOWARN)
-               xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
-
-       /*
-        * Typically, a repair is only requested by quotacheck.
-        */
-       ASSERT(id != -1);
-       ASSERT(flags & XFS_QMOPT_DQREPAIR);
-       memset(d, 0, sizeof(xfs_dqblk_t));
-
-       d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
-       d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
-       d->dd_diskdq.d_flags = type;
-       d->dd_diskdq.d_id = cpu_to_be32(id);
-
-       if (xfs_sb_version_hascrc(&mp->m_sb)) {
-               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
-               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                XFS_DQUOT_CRC_OFF);
-       }
-
-       return errs;
-}
-
 /*
  * Perform a dquot buffer recovery.
  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
@@ -3079,7 +2990,7 @@ xlog_recover_dquot_pass2(
         */
        dq_f = item->ri_buf[0].i_addr;
        ASSERT(dq_f);
-       error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+       error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
                           "xlog_recover_dquot_pass2 (log copy)");
        if (error)
                return XFS_ERROR(EIO);
@@ -3099,7 +3010,7 @@ xlog_recover_dquot_pass2(
         * was among a chunk of dquots created earlier, and we did some
         * minimal initialization then.
         */
-       error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+       error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
                           "xlog_recover_dquot_pass2");
        if (error) {
                xfs_buf_relse(bp);
@@ -4031,7 +3942,7 @@ xlog_unpack_data_crc(
        if (crc != rhead->h_crc) {
                if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
                        xfs_alert(log->l_mp,
-               "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
+               "log record CRC mismatch: found 0x%x, expected 0x%x.",
                                        le32_to_cpu(rhead->h_crc),
                                        le32_to_cpu(crc));
                        xfs_hex_dump(dp, 32);
index bbcec0bbc12da830f4a16479bfc8748589c4ddbc..2af1a0a4d0f17109ca2b5d1cdf12903c74451100 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_trans_space.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_da_btree.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Calculate the maximum length in bytes that would be required for a local
index 9163dc14053244c7ff6a70f60e91271995bbe87c..63ca2f0420b108206ed1d610e50fe11f64bf00c4 100644 (file)
@@ -17,9 +17,8 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
index 5dcc68019d1bc8c49a799695436045d69d49167f..da88f167af78dbf04df4eb0c1cdcfcad7a5bd699 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_dir2.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
 
 
 #ifdef HAVE_PERCPU_SB
index 3e6c2e6c9cd24d145514c95b7357ce91acc219f0..14a4996cfec6cb4fbeaa1d3f8432548ebb57d48b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -664,20 +661,6 @@ xfs_qm_dqdetach(
        }
 }
 
-int
-xfs_qm_calc_dquots_per_chunk(
-       struct xfs_mount        *mp,
-       unsigned int            nbblks) /* basic block units */
-{
-       unsigned int    ndquots;
-
-       ASSERT(nbblks > 0);
-       ndquots = BBTOB(nbblks);
-       do_div(ndquots, sizeof(xfs_dqblk_t));
-
-       return ndquots;
-}
-
 struct xfs_qm_isolate {
        struct list_head        buffers;
        struct list_head        dispose;
@@ -858,7 +841,7 @@ xfs_qm_init_quotainfo(
 
        /* Precalc some constants */
        qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
-       qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
+       qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
                                                        qinf->qi_dqchunklen);
 
        mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
@@ -1092,10 +1075,10 @@ xfs_qm_reset_dqcounts(
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
-                * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
+                * find uninitialised dquot blks. See comment in xfs_dqcheck.
                 */
-               (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
-                                     "xfs_quotacheck");
+               xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+                           "xfs_quotacheck");
                ddq->d_bcount = 0;
                ddq->d_icount = 0;
                ddq->d_rtbcount = 0;
index 2b602df9c242d3c3efe7b6841abe5fdbee28d00e..a788b66a5cb1c7e73898c877b0c3629eeb8e17a7 100644 (file)
@@ -103,8 +103,6 @@ xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
        return NULL;
 }
 
-extern int     xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp,
-                                            unsigned int nbblks);
 extern void    xfs_trans_mod_dquot(struct xfs_trans *,
                                        struct xfs_dquot *, uint, long);
 extern int     xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
index 3af50ccdfac1a10da858ef26e80b040ad4a474f1..e9be63abd8d29f9003521ce5efa0b2aeaae2f467 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 
 
index 8174aad0b38813ec836ea044d9a1b7b4dc06f300..437c9198031a49a940aecaa441dc305de0e9a145 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -287,7 +281,7 @@ xfs_qm_scall_trunc_qfiles(
        int             error = 0, error2 = 0;
 
        if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
-               xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
+               xfs_debug(mp, "%s: flags=%x m_qflags=%x",
                        __func__, flags, mp->m_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -325,7 +319,7 @@ xfs_qm_scall_quotaon(
        sbflags = 0;
 
        if (flags == 0) {
-               xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
+               xfs_debug(mp, "%s: zero flags, m_qflags=%x",
                        __func__, mp->m_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -348,7 +342,7 @@ xfs_qm_scall_quotaon(
             (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
             (flags & XFS_PQUOTA_ENFD))) {
                xfs_debug(mp,
-                       "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
+                       "%s: Can't enforce without acct, flags=%x sbflags=%x",
                        __func__, flags, mp->m_sb.sb_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -648,7 +642,7 @@ xfs_qm_scall_setqlim(
                        q->qi_bsoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
+               xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
        }
        hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
                (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
@@ -664,7 +658,7 @@ xfs_qm_scall_setqlim(
                        q->qi_rtbsoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+               xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
        }
 
        hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
@@ -681,7 +675,7 @@ xfs_qm_scall_setqlim(
                        q->qi_isoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
+               xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
        }
 
        /*
index e7d84d2d86830a25a5cd9778521766d4a592c45b..5376dd406ba2c099e23230014b1b23cbb832f6ad 100644 (file)
@@ -150,10 +150,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
        xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
                                f | XFS_QMOPT_RES_REGBLKS)
 
-extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
-                               xfs_dqid_t, uint, uint, char *);
 extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif /* __XFS_QUOTA_H__ */
index e6b0d6e1f4f2b0560a486e1e14e179193325b71f..b3b2b1065c0f4db8a6c972880b2dc8601e189e03 100644 (file)
@@ -154,4 +154,8 @@ typedef __uint16_t  xfs_qwarncnt_t;
                (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
 #define XFS_QMOPT_RESBLK_MASK  (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
 
+extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+                      xfs_dqid_t id, uint type, uint flags, char *str);
+extern int xfs_calc_dquots_per_chunk(struct xfs_mount *mp, unsigned int nbblks);
+
 #endif /* __XFS_QUOTA_H__ */
index 1326d81596c2920b27f45021b67b76979e5ef387..af33cafe69b6417c201c90175a8f3e9bb4417814 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
 #include "xfs_quota.h"
 #include "xfs_trans.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_qm.h"
 #include <linux/quota.h>
 
index 6f9e63c9fc2617ab89966447083527f1d0c94257..a6a76b2b6a85db9ece8acb0565e82e310319ec9d 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
-#include "xfs_fsops.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_buf.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
 
 
 /*
- * Prototypes for internal functions.
- */
-
-
-STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *);
-STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int,
-               xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *);
-STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, int, xfs_rtblock_t *, int *);
-STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int,
-               xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *);
-STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, int);
-STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
-               xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *);
-
-/*
- * Internal functions.
- */
-
-/*
- * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ * Read and return the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
  */
 STATIC int                             /* error */
-xfs_growfs_rt_alloc(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_extlen_t    oblocks,        /* old count of blocks */
-       xfs_extlen_t    nblocks,        /* new count of blocks */
-       xfs_inode_t     *ip)            /* inode (bitmap/summary) */
+xfs_rtget_summary(
+       xfs_mount_t     *mp,            /* file system mount structure */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       int             log,            /* log2 of extent size */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
+       xfs_suminfo_t   *sum)           /* out: summary info for this block */
 {
-       xfs_fileoff_t   bno;            /* block number in file */
-       xfs_buf_t       *bp;            /* temporary buffer for zeroing */
-       int             committed;      /* transaction committed flag */
-       xfs_daddr_t     d;              /* disk block address */
-       int             error;          /* error return value */
-       xfs_fsblock_t   firstblock;     /* first block allocated in xaction */
-       xfs_bmap_free_t flist;          /* list of freed blocks */
-       xfs_fsblock_t   fsbno;          /* filesystem block for bno */
-       xfs_bmbt_irec_t map;            /* block map output */
-       int             nmap;           /* number of block maps */
-       int             resblks;        /* space reservation */
+       xfs_buf_t       *bp;            /* buffer for summary block */
+       int             error;          /* error value */
+       xfs_fsblock_t   sb;             /* summary fsblock */
+       int             so;             /* index into the summary file */
+       xfs_suminfo_t   *sp;            /* pointer to returned data */
 
        /*
-        * Allocate space to the file, as necessary.
+        * Compute entry number in the summary file.
         */
-       while (oblocks < nblocks) {
-               int             cancelflags = 0;
-               xfs_trans_t     *tp;
-
-               tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
-               resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
-               /*
-                * Reserve space & log for one extent added to the file.
-                */
-               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
-                                         resblks, 0);
-               if (error)
-                       goto error_cancel;
-               cancelflags = XFS_TRANS_RELEASE_LOG_RES;
-               /*
-                * Lock the inode.
-                */
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-               xfs_bmap_init(&flist, &firstblock);
-               /*
-                * Allocate blocks to the bitmap file.
-                */
-               nmap = 1;
-               cancelflags |= XFS_TRANS_ABORT;
-               error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
-                                       XFS_BMAPI_METADATA, &firstblock,
-                                       resblks, &map, &nmap, &flist);
-               if (!error && nmap < 1)
-                       error = XFS_ERROR(ENOSPC);
-               if (error)
-                       goto error_cancel;
-               /*
-                * Free any blocks freed up in the transaction, then commit.
-                */
-               error = xfs_bmap_finish(&tp, &flist, &committed);
-               if (error)
-                       goto error_cancel;
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       goto error;
+       so = XFS_SUMOFFS(mp, log, bbno);
+       /*
+        * Compute the block number in the summary file.
+        */
+       sb = XFS_SUMOFFSTOBLOCK(mp, so);
+       /*
+        * If we have an old buffer, and the block number matches, use that.
+        */
+       if (rbpp && *rbpp && *rsb == sb)
+               bp = *rbpp;
+       /*
+        * Otherwise we have to get the buffer.
+        */
+       else {
                /*
-                * Now we need to clear the allocated blocks.
-                * Do this one block per transaction, to keep it simple.
+                * If there was an old one, get rid of it first.
                 */
-               cancelflags = 0;
-               for (bno = map.br_startoff, fsbno = map.br_startblock;
-                    bno < map.br_startoff + map.br_blockcount;
-                    bno++, fsbno++) {
-                       tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
-                       /*
-                        * Reserve log for one block zeroing.
-                        */
-                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
-                                                 0, 0);
-                       if (error)
-                               goto error_cancel;
-                       /*
-                        * Lock the bitmap inode.
-                        */
-                       xfs_ilock(ip, XFS_ILOCK_EXCL);
-                       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-                       /*
-                        * Get a buffer for the block.
-                        */
-                       d = XFS_FSB_TO_DADDR(mp, fsbno);
-                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-                               mp->m_bsize, 0);
-                       if (bp == NULL) {
-                               error = XFS_ERROR(EIO);
-error_cancel:
-                               xfs_trans_cancel(tp, cancelflags);
-                               goto error;
-                       }
-                       memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
-                       xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
-                       /*
-                        * Commit the transaction.
-                        */
-                       error = xfs_trans_commit(tp, 0);
-                       if (error)
-                               goto error;
+               if (rbpp && *rbpp)
+                       xfs_trans_brelse(tp, *rbpp);
+               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+               if (error) {
+                       return error;
                }
                /*
-                * Go on to the next extent, if any.
+                * Remember this buffer and block for the next call.
                 */
-               oblocks = map.br_startoff + map.br_blockcount;
+               if (rbpp) {
+                       *rbpp = bp;
+                       *rsb = sb;
+               }
        }
+       /*
+        * Point to the summary information & copy it out.
+        */
+       sp = XFS_SUMPTR(mp, bp, so);
+       *sum = *sp;
+       /*
+        * Drop the buffer if we're not asked to remember it.
+        */
+       if (!rbpp)
+               xfs_trans_brelse(tp, bp);
        return 0;
-
-error:
-       return error;
 }
 
+
 /*
- * Attempt to allocate an extent minlen<=len<=maxlen starting from
- * bitmap block bbno.  If we don't get maxlen then use prod to trim
- * the length, if given.  Returns error; returns starting block in *rtblock.
- * The lengths are all in rtextents.
+ * Return whether there are any free extents in the size range given
+ * by low and high, for the bitmap block bbno.
  */
 STATIC int                             /* error */
-xfs_rtallocate_extent_block(
-       xfs_mount_t     *mp,            /* file system mount point */
+xfs_rtany_summary(
+       xfs_mount_t     *mp,            /* file system mount structure */
        xfs_trans_t     *tp,            /* transaction pointer */
+       int             low,            /* low log2 extent size */
+       int             high,           /* high log2 extent size */
        xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       xfs_rtblock_t   *nextp,         /* out: next block to try */
        xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
        xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       int             *stat)          /* out: any good extents here? */
 {
-       xfs_rtblock_t   besti;          /* best rtblock found so far */
-       xfs_rtblock_t   bestlen;        /* best length found so far */
-       xfs_rtblock_t   end;            /* last rtblock in chunk */
        int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current rtblock trying */
-       xfs_rtblock_t   next;           /* next rtblock to try */
-       int             stat;           /* status from internal calls */
+       int             log;            /* loop counter, log2 of ext. size */
+       xfs_suminfo_t   sum;            /* summary data */
 
        /*
-        * Loop over all the extents starting in this bitmap block,
-        * looking for one that's long enough.
+        * Loop over logs of extent sizes.  Order is irrelevant.
         */
-       for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
-               end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
-            i <= end;
-            i++) {
+       for (log = low; log <= high; log++) {
                /*
-                * See if there's a free extent of maxlen starting at i.
-                * If it's not so then next will contain the first non-free.
+                * Get one summary datum.
                 */
-               error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+               error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
                if (error) {
                        return error;
                }
-               if (stat) {
-                       /*
-                        * i for maxlen is all free, allocate and return that.
-                        */
-                       error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
-                               rsb);
-                       if (error) {
-                               return error;
-                       }
-                       *len = maxlen;
-                       *rtblock = i;
-                       return 0;
-               }
                /*
-                * In the case where we have a variable-sized allocation
-                * request, figure out how big this free piece is,
-                * and if it's big enough for the minimum, and the best
-                * so far, remember it.
+                * If there are any, return success.
                 */
-               if (minlen < maxlen) {
-                       xfs_rtblock_t   thislen;        /* this extent size */
-
-                       thislen = next - i;
-                       if (thislen >= minlen && thislen > bestlen) {
-                               besti = i;
-                               bestlen = thislen;
-                       }
+               if (sum) {
+                       *stat = 1;
+                       return 0;
                }
-               /*
-                * If not done yet, find the start of the next free space.
-                */
-               if (next < end) {
-                       error = xfs_rtfind_forw(mp, tp, next, end, &i);
-                       if (error) {
-                               return error;
-                       }
-               } else
-                       break;
        }
        /*
-        * Searched the whole thing & didn't find a maxlen free extent.
+        * Found nothing, return failure.
+        */
+       *stat = 0;
+       return 0;
+}
+
+
+/*
+ * Copy and transform the summary file, given the old and new
+ * parameters in the mount structures.
+ */
+STATIC int                             /* error */
+xfs_rtcopy_summary(
+       xfs_mount_t     *omp,           /* old file system mount point */
+       xfs_mount_t     *nmp,           /* new file system mount point */
+       xfs_trans_t     *tp)            /* transaction pointer */
+{
+       xfs_rtblock_t   bbno;           /* bitmap block number */
+       xfs_buf_t       *bp;            /* summary buffer */
+       int             error;          /* error return value */
+       int             log;            /* summary level number (log length) */
+       xfs_suminfo_t   sum;            /* summary data */
+       xfs_fsblock_t   sumbno;         /* summary block number */
+
+       bp = NULL;
+       for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
+               for (bbno = omp->m_sb.sb_rbmblocks - 1;
+                    (xfs_srtblock_t)bbno >= 0;
+                    bbno--) {
+                       error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
+                               &sumbno, &sum);
+                       if (error)
+                               return error;
+                       if (sum == 0)
+                               continue;
+                       error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
+                               &bp, &sumbno);
+                       if (error)
+                               return error;
+                       error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
+                               &bp, &sumbno);
+                       if (error)
+                               return error;
+                       ASSERT(sum > 0);
+               }
+       }
+       return 0;
+}
+/*
+ * Mark an extent specified by start and len allocated.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int                             /* error */
+xfs_rtallocate_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* start block to allocate */
+       xfs_extlen_t    len,            /* length to allocate */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_rtblock_t   end;            /* end of the allocated extent */
+       int             error;          /* error value */
+       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
+       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
+
+       end = start + len - 1;
+       /*
+        * Assume we're allocating out of the middle of a free extent.
+        * We need to find the beginning and end of the extent so we can
+        * properly update the summary.
+        */
+       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Find the next allocated block (end of free extent).
+        */
+       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+               &postblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Decrement the summary information corresponding to the entire
+        * (old) free extent.
+        */
+       error = xfs_rtmodify_summary(mp, tp,
+               XFS_RTBLOCKLOG(postblock + 1 - preblock),
+               XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+       if (error) {
+               return error;
+       }
+       /*
+        * If there are blocks not being allocated at the front of the
+        * old extent, add summary data for them to be free.
+        */
+       if (preblock < start) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(start - preblock),
+                       XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * If there are blocks not being allocated at the end of the
+        * old extent, add summary data for them to be free.
+        */
+       if (postblock > end) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(postblock - end),
+                       XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * Modify the bitmap to mark this extent allocated.
+        */
+       error = xfs_rtmodify_range(mp, tp, start, len, 0);
+       return error;
+}
+
+/*
+ * Attempt to allocate an extent minlen<=len<=maxlen starting from
+ * bitmap block bbno.  If we don't get maxlen then use prod to trim
+ * the length, if given.  Returns error; returns starting block in *rtblock.
+ * The lengths are all in rtextents.
+ */
+STATIC int                             /* error */
+xfs_rtallocate_extent_block(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       xfs_extlen_t    minlen,         /* minimum length to allocate */
+       xfs_extlen_t    maxlen,         /* maximum length to allocate */
+       xfs_extlen_t    *len,           /* out: actual length allocated */
+       xfs_rtblock_t   *nextp,         /* out: next block to try */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
+       xfs_extlen_t    prod,           /* extent product factor */
+       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+{
+       xfs_rtblock_t   besti;          /* best rtblock found so far */
+       xfs_rtblock_t   bestlen;        /* best length found so far */
+       xfs_rtblock_t   end;            /* last rtblock in chunk */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current rtblock trying */
+       xfs_rtblock_t   next;           /* next rtblock to try */
+       int             stat;           /* status from internal calls */
+
+       /*
+        * Loop over all the extents starting in this bitmap block,
+        * looking for one that's long enough.
+        */
+       for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
+               end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+            i <= end;
+            i++) {
+               /*
+                * See if there's a free extent of maxlen starting at i.
+                * If it's not so then next will contain the first non-free.
+                */
+               error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+               if (error) {
+                       return error;
+               }
+               if (stat) {
+                       /*
+                        * i for maxlen is all free, allocate and return that.
+                        */
+                       error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
+                               rsb);
+                       if (error) {
+                               return error;
+                       }
+                       *len = maxlen;
+                       *rtblock = i;
+                       return 0;
+               }
+               /*
+                * In the case where we have a variable-sized allocation
+                * request, figure out how big this free piece is,
+                * and if it's big enough for the minimum, and the best
+                * so far, remember it.
+                */
+               if (minlen < maxlen) {
+                       xfs_rtblock_t   thislen;        /* this extent size */
+
+                       thislen = next - i;
+                       if (thislen >= minlen && thislen > bestlen) {
+                               besti = i;
+                               bestlen = thislen;
+                       }
+               }
+               /*
+                * If not done yet, find the start of the next free space.
+                */
+               if (next < end) {
+                       error = xfs_rtfind_forw(mp, tp, next, end, &i);
+                       if (error) {
+                               return error;
+                       }
+               } else
+                       break;
+       }
+       /*
+        * Searched the whole thing & didn't find a maxlen free extent.
         */
        if (minlen < maxlen && besti != -1) {
                xfs_extlen_t    p;      /* amount to trim length by */
@@ -639,1191 +727,205 @@ xfs_rtallocate_extent_size(
                         */
                        if (r != NULLRTBLOCK) {
                                *rtblock = r;
-                               return 0;
-                       }
-                       /*
-                        * If the "next block to try" returned from the
-                        * allocator is beyond the next bitmap block,
-                        * skip to that bitmap block.
-                        */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
-               }
-       }
-       /*
-        * Didn't find any maxlen blocks.  Try smaller ones, unless
-        * we're asking for a fixed size extent.
-        */
-       if (minlen > --maxlen) {
-               *rtblock = NULLRTBLOCK;
-               return 0;
-       }
-       ASSERT(minlen != 0);
-       ASSERT(maxlen != 0);
-
-       /*
-        * Loop over sizes, from maxlen down to minlen.
-        * This time, when we do the allocations, allow smaller ones
-        * to succeed.
-        */
-       for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
-               /*
-                * Loop over all the bitmap blocks, try an allocation
-                * starting in that block.
-                */
-               for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
-                       /*
-                        * Get the summary information for this level/block.
-                        */
-                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
-                                                 &sum);
-                       if (error) {
-                               return error;
-                       }
-                       /*
-                        * If nothing there, go on to next.
-                        */
-                       if (!sum)
-                               continue;
-                       /*
-                        * Try the allocation.  Make sure the specified
-                        * minlen/maxlen are in the possible range for
-                        * this summary level.
-                        */
-                       error = xfs_rtallocate_extent_block(mp, tp, i,
-                                       XFS_RTMAX(minlen, 1 << l),
-                                       XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
-                                       len, &n, rbpp, rsb, prod, &r);
-                       if (error) {
-                               return error;
-                       }
-                       /*
-                        * If it worked, return that extent.
-                        */
-                       if (r != NULLRTBLOCK) {
-                               *rtblock = r;
-                               return 0;
-                       }
-                       /*
-                        * If the "next block to try" returned from the
-                        * allocator is beyond the next bitmap block,
-                        * skip to that bitmap block.
-                        */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
-               }
-       }
-       /*
-        * Got nothing, return failure.
-        */
-       *rtblock = NULLRTBLOCK;
-       return 0;
-}
-
-/*
- * Mark an extent specified by start and len allocated.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int                             /* error */
-xfs_rtallocate_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* start block to allocate */
-       xfs_extlen_t    len,            /* length to allocate */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
-{
-       xfs_rtblock_t   end;            /* end of the allocated extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
-       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
-
-       end = start + len - 1;
-       /*
-        * Assume we're allocating out of the middle of a free extent.
-        * We need to find the beginning and end of the extent so we can
-        * properly update the summary.
-        */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Find the next allocated block (end of free extent).
-        */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Decrement the summary information corresponding to the entire
-        * (old) free extent.
-        */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-       if (error) {
-               return error;
-       }
-       /*
-        * If there are blocks not being allocated at the front of the
-        * old extent, add summary data for them to be free.
-        */
-       if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * If there are blocks not being allocated at the end of the
-        * old extent, add summary data for them to be free.
-        */
-       if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * Modify the bitmap to mark this extent allocated.
-        */
-       error = xfs_rtmodify_range(mp, tp, start, len, 0);
-       return error;
-}
-
-/*
- * Return whether there are any free extents in the size range given
- * by low and high, for the bitmap block bbno.
- */
-STATIC int                             /* error */
-xfs_rtany_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             low,            /* low log2 extent size */
-       int             high,           /* high log2 extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       int             *stat)          /* out: any good extents here? */
-{
-       int             error;          /* error value */
-       int             log;            /* loop counter, log2 of ext. size */
-       xfs_suminfo_t   sum;            /* summary data */
-
-       /*
-        * Loop over logs of extent sizes.  Order is irrelevant.
-        */
-       for (log = low; log <= high; log++) {
-               /*
-                * Get one summary datum.
-                */
-               error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
-               if (error) {
-                       return error;
-               }
-               /*
-                * If there are any, return success.
-                */
-               if (sum) {
-                       *stat = 1;
-                       return 0;
-               }
-       }
-       /*
-        * Found nothing, return failure.
-        */
-       *stat = 0;
-       return 0;
-}
-
-/*
- * Get a buffer for the bitmap or summary file block specified.
- * The buffer is returned read and locked.
- */
-STATIC int                             /* error */
-xfs_rtbuf_get(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   block,          /* block number in bitmap or summary */
-       int             issum,          /* is summary not bitmap */
-       xfs_buf_t       **bpp)          /* output: buffer for the block */
-{
-       xfs_buf_t       *bp;            /* block buffer, result */
-       xfs_inode_t     *ip;            /* bitmap or summary inode */
-       xfs_bmbt_irec_t map;
-       int             nmap = 1;
-       int             error;          /* error value */
-
-       ip = issum ? mp->m_rsumip : mp->m_rbmip;
-
-       error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
-       if (error)
-               return error;
-
-       ASSERT(map.br_startblock != NULLFSBLOCK);
-       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
-                                  XFS_FSB_TO_DADDR(mp, map.br_startblock),
-                                  mp->m_bsize, 0, &bp, NULL);
-       if (error)
-               return error;
-       ASSERT(!xfs_buf_geterror(bp));
-       *bpp = bp;
-       return 0;
-}
-
-#ifdef DEBUG
-/*
- * Check that the given extent (block range) is allocated already.
- */
-STATIC int                             /* error */
-xfs_rtcheck_alloc_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number of extent */
-       xfs_extlen_t    len,            /* length of extent */
-       int             *stat)          /* out: 1 for allocated, 0 for not */
-{
-       xfs_rtblock_t   new;            /* dummy for xfs_rtcheck_range */
-
-       return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat);
-}
-#endif
-
-/*
- * Check that the given range is either all allocated (val = 0) or
- * all free (val = 1).
- */
-STATIC int                             /* error */
-xfs_rtcheck_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block number of extent */
-       xfs_extlen_t    len,            /* length of extent */
-       int             val,            /* 1 for free, 0 for allocated */
-       xfs_rtblock_t   *new,           /* out: first block not matching */
-       int             *stat)          /* out: 1 for matches, 0 for not */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute starting bitmap block number
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       /*
-        * Read the bitmap block.
-        */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Compute the starting word's address, and starting bit.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       /*
-        * 0 (allocated) => all zero's; 1 (free) => all one's.
-        */
-       val = -val;
-       /*
-        * If not starting on a word boundary, deal with the first
-        * (partial) word.
-        */
-       if (bit) {
-               /*
-                * Compute first bit not examined.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               /*
-                * Mask of relevant bits.
-                */
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ val) & mask)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = XFS_RTLOBIT(wdiff) - bit;
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               }
-               i = lastbit - bit;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ val)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Mask of relevant bits.
-                */
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ val) & mask)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * Successful, return.
-        */
-       xfs_trans_brelse(tp, bp);
-       *new = start + i;
-       *stat = 1;
-       return 0;
-}
-
-/*
- * Copy and transform the summary file, given the old and new
- * parameters in the mount structures.
- */
-STATIC int                             /* error */
-xfs_rtcopy_summary(
-       xfs_mount_t     *omp,           /* old file system mount point */
-       xfs_mount_t     *nmp,           /* new file system mount point */
-       xfs_trans_t     *tp)            /* transaction pointer */
-{
-       xfs_rtblock_t   bbno;           /* bitmap block number */
-       xfs_buf_t       *bp;            /* summary buffer */
-       int             error;          /* error return value */
-       int             log;            /* summary level number (log length) */
-       xfs_suminfo_t   sum;            /* summary data */
-       xfs_fsblock_t   sumbno;         /* summary block number */
-
-       bp = NULL;
-       for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
-               for (bbno = omp->m_sb.sb_rbmblocks - 1;
-                    (xfs_srtblock_t)bbno >= 0;
-                    bbno--) {
-                       error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
-                               &sumbno, &sum);
-                       if (error)
-                               return error;
-                       if (sum == 0)
-                               continue;
-                       error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
-                               &bp, &sumbno);
-                       if (error)
-                               return error;
-                       error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
-                               &bp, &sumbno);
-                       if (error)
-                               return error;
-                       ASSERT(sum > 0);
-               }
-       }
-       return 0;
-}
-
-/*
- * Searching backward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int                             /* error */
-xfs_rtfind_back(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   firstbit;       /* first useful bit in the word */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute and read in starting bitmap block for starting block.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Get the first word's index & point to it.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       len = start - limit + 1;
-       /*
-        * Compute match value, based on the bit at start: if 1 (free)
-        * then all-ones, else all-zeroes.
-        */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-       /*
-        * If the starting position is not word-aligned, deal with the
-        * partial word.
-        */
-       if (bit < XFS_NBWORD - 1) {
-               /*
-                * Calculate first (leftmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
-               mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
-                       firstbit;
-               /*
-                * Calculate the difference between the value there
-                * and what we're looking for.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different.  Mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = bit - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               }
-               i = bit - firstbit + 1;
-               /*
-                * Go on to previous block if that's where the previous word is
-                * and we need the previous word.
-                */
-               if (--word == -1 && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the previous one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ want)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to previous block if that's where the previous word is
-                * and we need the previous word.
-                */
-               if (--word == -1 && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if (len - i) {
-               /*
-                * Calculate first (leftmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               firstbit = XFS_NBWORD - (len - i);
-               mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * No match, return that we scanned the whole area.
-        */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start - i + 1;
-       return 0;
-}
-
-/*
- * Searching forward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int                             /* error */
-xfs_rtfind_forw(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in the word */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute and read in starting bitmap block for starting block.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Get the first word's index & point to it.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       len = limit - start + 1;
-       /*
-        * Compute match value, based on the bit at start: if 1 (free)
-        * then all-ones, else all-zeroes.
-        */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-       /*
-        * If the starting position is not word-aligned, deal with the
-        * partial word.
-        */
-       if (bit) {
-               /*
-                * Calculate last (rightmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Calculate the difference between the value there
-                * and what we're looking for.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different.  Mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = XFS_RTLOBIT(wdiff) - bit;
-                       *rtblock = start + i - 1;
-                       return 0;
-               }
-               i = lastbit - bit;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ want)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Calculate mask for all the relevant bits in this word.
-                */
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * No match, return that we scanned the whole area.
-        */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start + i - 1;
-       return 0;
-}
-
-/*
- * Mark an extent specified by start and len freed.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int                             /* error */
-xfs_rtfree_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to free */
-       xfs_extlen_t    len,            /* length to free */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
-{
-       xfs_rtblock_t   end;            /* end of the freed extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock;      /* first block freed > end */
-       xfs_rtblock_t   preblock;       /* first block freed < start */
-
-       end = start + len - 1;
-       /*
-        * Modify the bitmap to mark this extent freed.
-        */
-       error = xfs_rtmodify_range(mp, tp, start, len, 1);
-       if (error) {
-               return error;
-       }
-       /*
-        * Assume we're freeing out of the middle of an allocated extent.
-        * We need to find the beginning and end of the extent so we can
-        * properly update the summary.
-        */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Find the next allocated block (end of allocated extent).
-        */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
-       if (error)
-               return error;
-       /*
-        * If there are blocks not being freed at the front of the
-        * old extent, add summary data for them to be allocated.
-        */
-       if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * If there are blocks not being freed at the end of the
-        * old extent, add summary data for them to be allocated.
-        */
-       if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * Increment the summary information corresponding to the entire
-        * (new) free extent.
-        */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-       return error;
-}
-
-/*
- * Read and return the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int                             /* error */
-xfs_rtget_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_suminfo_t   *sum)           /* out: summary info for this block */
-{
-       xfs_buf_t       *bp;            /* buffer for summary block */
-       int             error;          /* error value */
-       xfs_fsblock_t   sb;             /* summary fsblock */
-       int             so;             /* index into the summary file */
-       xfs_suminfo_t   *sp;            /* pointer to returned data */
-
-       /*
-        * Compute entry number in the summary file.
-        */
-       so = XFS_SUMOFFS(mp, log, bbno);
-       /*
-        * Compute the block number in the summary file.
-        */
-       sb = XFS_SUMOFFSTOBLOCK(mp, so);
-       /*
-        * If we have an old buffer, and the block number matches, use that.
-        */
-       if (rbpp && *rbpp && *rsb == sb)
-               bp = *rbpp;
-       /*
-        * Otherwise we have to get the buffer.
-        */
-       else {
-               /*
-                * If there was an old one, get rid of it first.
-                */
-               if (rbpp && *rbpp)
-                       xfs_trans_brelse(tp, *rbpp);
-               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-               if (error) {
-                       return error;
-               }
-               /*
-                * Remember this buffer and block for the next call.
-                */
-               if (rbpp) {
-                       *rbpp = bp;
-                       *rsb = sb;
-               }
-       }
-       /*
-        * Point to the summary information & copy it out.
-        */
-       sp = XFS_SUMPTR(mp, bp, so);
-       *sum = *sp;
-       /*
-        * Drop the buffer if we're not asked to remember it.
-        */
-       if (!rbpp)
-               xfs_trans_brelse(tp, bp);
-       return 0;
-}
-
-/*
- * Set the given range of bitmap bits to the given value.
- * Do whatever I/O and logging is required.
- */
-STATIC int                             /* error */
-xfs_rtmodify_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to modify */
-       xfs_extlen_t    len,            /* length of extent to modify */
-       int             val)            /* 1 for free, 0 for allocated */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtword_t    *first;         /* first used word in the buffer */
-       int             i;              /* current bit number rel. to start */
-       int             lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask o frelevant bits for value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute starting bitmap block number.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       /*
-        * Read the bitmap block, and point to its data.
-        */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
+                               return 0;
+                       }
+                       /*
+                        * If the "next block to try" returned from the
+                        * allocator is beyond the next bitmap block,
+                        * skip to that bitmap block.
+                        */
+                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
+                               i = XFS_BITTOBLOCK(mp, n) - 1;
+               }
        }
-       bufp = bp->b_addr;
-       /*
-        * Compute the starting word's address, and starting bit.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       first = b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
        /*
-        * 0 (allocated) => all zeroes; 1 (free) => all ones.
+        * Didn't find any maxlen blocks.  Try smaller ones, unless
+        * we're asking for a fixed size extent.
         */
-       val = -val;
+       if (minlen > --maxlen) {
+               *rtblock = NULLRTBLOCK;
+               return 0;
+       }
+       ASSERT(minlen != 0);
+       ASSERT(maxlen != 0);
+
        /*
-        * If not starting on a word boundary, deal with the first
-        * (partial) word.
+        * Loop over sizes, from maxlen down to minlen.
+        * This time, when we do the allocations, allow smaller ones
+        * to succeed.
         */
-       if (bit) {
-               /*
-                * Compute first bit not changed and mask of relevant bits.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Set/clear the active bits.
-                */
-               if (val)
-                       *b |= mask;
-               else
-                       *b &= ~mask;
-               i = lastbit - bit;
+       for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
                /*
-                * Go on to the next block if that's where the next word is
-                * and we need the next word.
+                * Loop over all the bitmap blocks, try an allocation
+                * starting in that block.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
                        /*
-                        * Log the changed part of this block.
-                        * Get the next one.
+                        * Get the summary information for this level/block.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
+                                                 &sum);
                        if (error) {
                                return error;
                        }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
                        /*
-                        * Go on to the next word in the buffer
+                        * If nothing there, go on to next.
                         */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Set the word value correctly.
-                */
-               *b = val;
-               i += XFS_NBWORD;
-               /*
-                * Go on to the next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       if (!sum)
+                               continue;
                        /*
-                        * Log the changed part of this block.
-                        * Get the next one.
+                        * Try the allocation.  Make sure the specified
+                        * minlen/maxlen are in the possible range for
+                        * this summary level.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       error = xfs_rtallocate_extent_block(mp, tp, i,
+                                       XFS_RTMAX(minlen, 1 << l),
+                                       XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
+                                       len, &n, rbpp, rsb, prod, &r);
                        if (error) {
                                return error;
                        }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
                        /*
-                        * Go on to the next word in the buffer
+                        * If it worked, return that extent.
+                        */
+                       if (r != NULLRTBLOCK) {
+                               *rtblock = r;
+                               return 0;
+                       }
+                       /*
+                        * If the "next block to try" returned from the
+                        * allocator is beyond the next bitmap block,
+                        * skip to that bitmap block.
                         */
-                       b++;
+                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
+                               i = XFS_BITTOBLOCK(mp, n) - 1;
                }
        }
        /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Compute a mask of relevant bits.
-                */
-               bit = 0;
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Set/clear the active bits.
-                */
-               if (val)
-                       *b |= mask;
-               else
-                       *b &= ~mask;
-               b++;
-       }
-       /*
-        * Log any remaining changed bytes.
+        * Got nothing, return failure.
         */
-       if (b > first)
-               xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
-                       (uint)((char *)b - (char *)bufp - 1));
+       *rtblock = NULLRTBLOCK;
        return 0;
 }
 
 /*
- * Read and modify the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
 STATIC int                             /* error */
-xfs_rtmodify_summary(
+xfs_growfs_rt_alloc(
        xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       int             delta,          /* change to make to summary info */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+       xfs_extlen_t    oblocks,        /* old count of blocks */
+       xfs_extlen_t    nblocks,        /* new count of blocks */
+       xfs_inode_t     *ip)            /* inode (bitmap/summary) */
 {
-       xfs_buf_t       *bp;            /* buffer for the summary block */
-       int             error;          /* error value */
-       xfs_fsblock_t   sb;             /* summary fsblock */
-       int             so;             /* index into the summary file */
-       xfs_suminfo_t   *sp;            /* pointer to returned data */
+       xfs_fileoff_t   bno;            /* block number in file */
+       xfs_buf_t       *bp;            /* temporary buffer for zeroing */
+       int             committed;      /* transaction committed flag */
+       xfs_daddr_t     d;              /* disk block address */
+       int             error;          /* error return value */
+       xfs_fsblock_t   firstblock;     /* first block allocated in xaction */
+       xfs_bmap_free_t flist;          /* list of freed blocks */
+       xfs_fsblock_t   fsbno;          /* filesystem block for bno */
+       xfs_bmbt_irec_t map;            /* block map output */
+       int             nmap;           /* number of block maps */
+       int             resblks;        /* space reservation */
 
        /*
-        * Compute entry number in the summary file.
-        */
-       so = XFS_SUMOFFS(mp, log, bbno);
-       /*
-        * Compute the block number in the summary file.
-        */
-       sb = XFS_SUMOFFSTOBLOCK(mp, so);
-       /*
-        * If we have an old buffer, and the block number matches, use that.
-        */
-       if (rbpp && *rbpp && *rsb == sb)
-               bp = *rbpp;
-       /*
-        * Otherwise we have to get the buffer.
+        * Allocate space to the file, as necessary.
         */
-       else {
+       while (oblocks < nblocks) {
+               int             cancelflags = 0;
+               xfs_trans_t     *tp;
+
+               tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
+               resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
                /*
-                * If there was an old one, get rid of it first.
+                * Reserve space & log for one extent added to the file.
                 */
-               if (rbpp && *rbpp)
-                       xfs_trans_brelse(tp, *rbpp);
-               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-               if (error) {
-                       return error;
-               }
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+                                         resblks, 0);
+               if (error)
+                       goto error_cancel;
+               cancelflags = XFS_TRANS_RELEASE_LOG_RES;
                /*
-                * Remember this buffer and block for the next call.
+                * Lock the inode.
                 */
-               if (rbpp) {
-                       *rbpp = bp;
-                       *rsb = sb;
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+               xfs_bmap_init(&flist, &firstblock);
+               /*
+                * Allocate blocks to the bitmap file.
+                */
+               nmap = 1;
+               cancelflags |= XFS_TRANS_ABORT;
+               error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
+                                       XFS_BMAPI_METADATA, &firstblock,
+                                       resblks, &map, &nmap, &flist);
+               if (!error && nmap < 1)
+                       error = XFS_ERROR(ENOSPC);
+               if (error)
+                       goto error_cancel;
+               /*
+                * Free any blocks freed up in the transaction, then commit.
+                */
+               error = xfs_bmap_finish(&tp, &flist, &committed);
+               if (error)
+                       goto error_cancel;
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               if (error)
+                       goto error;
+               /*
+                * Now we need to clear the allocated blocks.
+                * Do this one block per transaction, to keep it simple.
+                */
+               cancelflags = 0;
+               for (bno = map.br_startoff, fsbno = map.br_startblock;
+                    bno < map.br_startoff + map.br_blockcount;
+                    bno++, fsbno++) {
+                       tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
+                       /*
+                        * Reserve log for one block zeroing.
+                        */
+                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
+                                                 0, 0);
+                       if (error)
+                               goto error_cancel;
+                       /*
+                        * Lock the bitmap inode.
+                        */
+                       xfs_ilock(ip, XFS_ILOCK_EXCL);
+                       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+                       /*
+                        * Get a buffer for the block.
+                        */
+                       d = XFS_FSB_TO_DADDR(mp, fsbno);
+                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+                               mp->m_bsize, 0);
+                       if (bp == NULL) {
+                               error = XFS_ERROR(EIO);
+error_cancel:
+                               xfs_trans_cancel(tp, cancelflags);
+                               goto error;
+                       }
+                       memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+                       xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+                       /*
+                        * Commit the transaction.
+                        */
+                       error = xfs_trans_commit(tp, 0);
+                       if (error)
+                               goto error;
                }
+               /*
+                * Go on to the next extent, if any.
+                */
+               oblocks = map.br_startoff + map.br_blockcount;
        }
-       /*
-        * Point to the summary information, modify and log it.
-        */
-       sp = XFS_SUMPTR(mp, bp, so);
-       *sp += delta;
-       xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
-               (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
        return 0;
+
+error:
+       return error;
 }
 
 /*
@@ -2128,66 +1230,6 @@ xfs_rtallocate_extent(
        return 0;
 }
 
-/*
- * Free an extent in the realtime subvolume.  Length is expressed in
- * realtime extents, as is the block number.
- */
-int                                    /* error */
-xfs_rtfree_extent(
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to free */
-       xfs_extlen_t    len)            /* length of extent freed */
-{
-       int             error;          /* error value */
-       xfs_mount_t     *mp;            /* file system mount structure */
-       xfs_fsblock_t   sb;             /* summary file block number */
-       xfs_buf_t       *sumbp;         /* summary file block buffer */
-
-       mp = tp->t_mountp;
-
-       ASSERT(mp->m_rbmip->i_itemp != NULL);
-       ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
-
-#ifdef DEBUG
-       /*
-        * Check to see that this whole range is currently allocated.
-        */
-       {
-               int     stat;           /* result from checking range */
-
-               error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat);
-               if (error) {
-                       return error;
-               }
-               ASSERT(stat);
-       }
-#endif
-       sumbp = NULL;
-       /*
-        * Free the range of realtime blocks.
-        */
-       error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
-       if (error) {
-               return error;
-       }
-       /*
-        * Mark more blocks free in the superblock.
-        */
-       xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
-       /*
-        * If we've now freed all the blocks, reset the file sequence
-        * number to 0.
-        */
-       if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
-           mp->m_sb.sb_rextents) {
-               if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
-                       mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
-               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
-               xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
-       }
-       return 0;
-}
-
 /*
  * Initialize realtime fields in the mount structure.
  */
index b2a1a24c0e2f3d8037cdd03f2b8deffc298d38c9..752b63d103003288d48c463571cc59279f8c531d 100644 (file)
@@ -95,6 +95,30 @@ xfs_growfs_rt(
        struct xfs_mount        *mp,    /* file system mount structure */
        xfs_growfs_rt_t         *in);   /* user supplied growfs struct */
 
+/*
+ * From xfs_rtbitmap.c
+ */
+int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
+                 xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
+int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                     xfs_rtblock_t start, xfs_extlen_t len, int val,
+                     xfs_rtblock_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
+                   xfs_rtblock_t start, xfs_rtblock_t limit,
+                   xfs_rtblock_t *rtblock);
+int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
+                   xfs_rtblock_t start, xfs_rtblock_t limit,
+                   xfs_rtblock_t *rtblock);
+int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                      xfs_rtblock_t start, xfs_extlen_t len, int val);
+int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
+                        xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
+                        xfs_fsblock_t *rsb);
+int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                    xfs_rtblock_t start, xfs_extlen_t len,
+                    struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
+
+
 #else
 # define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb)  (ENOSYS)
 # define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
diff --git a/fs/xfs/xfs_rtbitmap.c b/fs/xfs/xfs_rtbitmap.c
new file mode 100644 (file)
index 0000000..e30efe8
--- /dev/null
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_buf.h"
+#include "xfs_icache.h"
+#include "xfs_dinode.h"
+
+
+/*
+ * Realtime allocator bitmap functions shared with userspace.
+ */
+
+/*
+ * Get a buffer for the bitmap or summary file block specified.
+ * The buffer is returned read and locked.
+ */
+int
+xfs_rtbuf_get(
+       xfs_mount_t     *mp,            /* file system mount structure */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   block,          /* block number in bitmap or summary */
+       int             issum,          /* is summary not bitmap */
+       xfs_buf_t       **bpp)          /* output: buffer for the block */
+{
+       xfs_buf_t       *bp;            /* block buffer, result */
+       xfs_inode_t     *ip;            /* bitmap or summary inode */
+       xfs_bmbt_irec_t map;
+       int             nmap = 1;
+       int             error;          /* error value */
+
+       ip = issum ? mp->m_rsumip : mp->m_rbmip;
+
+       error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
+       if (error)
+               return error;
+
+       ASSERT(map.br_startblock != NULLFSBLOCK);
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+                                  XFS_FSB_TO_DADDR(mp, map.br_startblock),
+                                  mp->m_bsize, 0, &bp, NULL);
+       if (error)
+               return error;
+       ASSERT(!xfs_buf_geterror(bp));
+       *bpp = bp;
+       return 0;
+}
+
+/*
+ * Searching backward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_back(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to look at */
+       xfs_rtblock_t   limit,          /* last block to look at */
+       xfs_rtblock_t   *rtblock)       /* out: start block found */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   firstbit;       /* first useful bit in the word */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   len;            /* length of inspected area */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    want;           /* mask for "good" values */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute and read in starting bitmap block for starting block.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Get the first word's index & point to it.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       len = start - limit + 1;
+       /*
+        * Compute match value, based on the bit at start: if 1 (free)
+        * then all-ones, else all-zeroes.
+        */
+       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       /*
+        * If the starting position is not word-aligned, deal with the
+        * partial word.
+        */
+       if (bit < XFS_NBWORD - 1) {
+               /*
+                * Calculate first (leftmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
+               mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
+                       firstbit;
+               /*
+                * Calculate the difference between the value there
+                * and what we're looking for.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different.  Mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = bit - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               }
+               i = bit - firstbit + 1;
+               /*
+                * Go on to previous block if that's where the previous word is
+                * and we need the previous word.
+                */
+               if (--word == -1 && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       bufp = bp->b_addr;
+                       word = XFS_BLOCKWMASK(mp);
+                       b = &bufp[word];
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b--;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the previous one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ want)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to previous block if that's where the previous word is
+                * and we need the previous word.
+                */
+               if (--word == -1 && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       bufp = bp->b_addr;
+                       word = XFS_BLOCKWMASK(mp);
+                       b = &bufp[word];
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b--;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if (len - i) {
+               /*
+                * Calculate first (leftmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               firstbit = XFS_NBWORD - (len - i);
+               mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * No match, return that we scanned the whole area.
+        */
+       xfs_trans_brelse(tp, bp);
+       *rtblock = start - i + 1;
+       return 0;
+}
+
+/*
+ * Searching forward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_forw(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to look at */
+       xfs_rtblock_t   limit,          /* last block to look at */
+       xfs_rtblock_t   *rtblock)       /* out: start block found */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   lastbit;        /* last useful bit in the word */
+       xfs_rtblock_t   len;            /* length of inspected area */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    want;           /* mask for "good" values */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute and read in starting bitmap block for starting block.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Get the first word's index & point to it.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       len = limit - start + 1;
+       /*
+        * Compute match value, based on the bit at start: if 1 (free)
+        * then all-ones, else all-zeroes.
+        */
+       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       /*
+        * If the starting position is not word-aligned, deal with the
+        * partial word.
+        */
+       if (bit) {
+               /*
+                * Calculate last (rightmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Calculate the difference between the value there
+                * and what we're looking for.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different.  Mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = XFS_RTLOBIT(wdiff) - bit;
+                       *rtblock = start + i - 1;
+                       return 0;
+               }
+               i = lastbit - bit;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ want)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *rtblock = start + i - 1;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Calculate mask for all the relevant bits in this word.
+                */
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *rtblock = start + i - 1;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * No match, return that we scanned the whole area.
+        */
+       xfs_trans_brelse(tp, bp);
+       *rtblock = start + i - 1;
+       return 0;
+}
+
+/*
+ * Read and modify the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+int
+xfs_rtmodify_summary(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       int             log,            /* log2 of extent size */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       int             delta,          /* change to make to summary info */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_buf_t       *bp;            /* buffer for the summary block */
+       int             error;          /* error value */
+       xfs_fsblock_t   sb;             /* summary fsblock */
+       int             so;             /* index into the summary file */
+       xfs_suminfo_t   *sp;            /* pointer to returned data */
+
+       /*
+        * Compute entry number in the summary file.
+        */
+       so = XFS_SUMOFFS(mp, log, bbno);
+       /*
+        * Compute the block number in the summary file.
+        */
+       sb = XFS_SUMOFFSTOBLOCK(mp, so);
+       /*
+        * If we have an old buffer, and the block number matches, use that.
+        */
+       if (rbpp && *rbpp && *rsb == sb)
+               bp = *rbpp;
+       /*
+        * Otherwise we have to get the buffer.
+        */
+       else {
+               /*
+                * If there was an old one, get rid of it first.
+                */
+               if (rbpp && *rbpp)
+                       xfs_trans_brelse(tp, *rbpp);
+               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+               if (error) {
+                       return error;
+               }
+               /*
+                * Remember this buffer and block for the next call.
+                */
+               if (rbpp) {
+                       *rbpp = bp;
+                       *rsb = sb;
+               }
+       }
+       /*
+        * Point to the summary information, modify and log it.
+        */
+       sp = XFS_SUMPTR(mp, bp, so);
+       *sp += delta;
+       xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
+               (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
+       return 0;
+}
+
+/*
+ * Set the given range of bitmap bits to the given value.
+ * Do whatever I/O and logging is required.
+ */
+int
+xfs_rtmodify_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to modify */
+       xfs_extlen_t    len,            /* length of extent to modify */
+       int             val)            /* 1 for free, 0 for allocated */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtword_t    *first;         /* first used word in the buffer */
+       int             i;              /* current bit number rel. to start */
+       int             lastbit;        /* last useful bit in word */
+       xfs_rtword_t    mask;           /* mask o frelevant bits for value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute starting bitmap block number.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       /*
+        * Read the bitmap block, and point to its data.
+        */
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Compute the starting word's address, and starting bit.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       first = b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       /*
+        * 0 (allocated) => all zeroes; 1 (free) => all ones.
+        */
+       val = -val;
+       /*
+        * If not starting on a word boundary, deal with the first
+        * (partial) word.
+        */
+       if (bit) {
+               /*
+                * Compute first bit not changed and mask of relevant bits.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Set/clear the active bits.
+                */
+               if (val)
+                       *b |= mask;
+               else
+                       *b &= ~mask;
+               i = lastbit - bit;
+               /*
+                * Go on to the next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * Log the changed part of this block.
+                        * Get the next one.
+                        */
+                       xfs_trans_log_buf(tp, bp,
+                               (uint)((char *)first - (char *)bufp),
+                               (uint)((char *)b - (char *)bufp));
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       first = b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Set the word value correctly.
+                */
+               *b = val;
+               i += XFS_NBWORD;
+               /*
+                * Go on to the next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * Log the changed part of this block.
+                        * Get the next one.
+                        */
+                       xfs_trans_log_buf(tp, bp,
+                               (uint)((char *)first - (char *)bufp),
+                               (uint)((char *)b - (char *)bufp));
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       first = b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Compute a mask of relevant bits.
+                */
+               bit = 0;
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Set/clear the active bits.
+                */
+               if (val)
+                       *b |= mask;
+               else
+                       *b &= ~mask;
+               b++;
+       }
+       /*
+        * Log any remaining changed bytes.
+        */
+       if (b > first)
+               xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
+                       (uint)((char *)b - (char *)bufp - 1));
+       return 0;
+}
+
+/*
+ * Mark an extent specified by start and len freed.
+ * Updates all the summary information as well as the bitmap.
+ */
+int
+xfs_rtfree_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to free */
+       xfs_extlen_t    len,            /* length to free */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_rtblock_t   end;            /* end of the freed extent */
+       int             error;          /* error value */
+       xfs_rtblock_t   postblock;      /* first block freed > end */
+       xfs_rtblock_t   preblock;       /* first block freed < start */
+
+       end = start + len - 1;
+       /*
+        * Modify the bitmap to mark this extent freed.
+        */
+       error = xfs_rtmodify_range(mp, tp, start, len, 1);
+       if (error) {
+               return error;
+       }
+       /*
+        * Assume we're freeing out of the middle of an allocated extent.
+        * We need to find the beginning and end of the extent so we can
+        * properly update the summary.
+        */
+       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Find the next allocated block (end of allocated extent).
+        */
+       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+               &postblock);
+       if (error)
+               return error;
+       /*
+        * If there are blocks not being freed at the front of the
+        * old extent, add summary data for them to be allocated.
+        */
+       if (preblock < start) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(start - preblock),
+                       XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * If there are blocks not being freed at the end of the
+        * old extent, add summary data for them to be allocated.
+        */
+       if (postblock > end) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(postblock - end),
+                       XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * Increment the summary information corresponding to the entire
+        * (new) free extent.
+        */
+       error = xfs_rtmodify_summary(mp, tp,
+               XFS_RTBLOCKLOG(postblock + 1 - preblock),
+               XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+       return error;
+}
+
+/*
+ * Check that the given range is either all allocated (val = 0) or
+ * all free (val = 1).
+ */
+int
+xfs_rtcheck_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block number of extent */
+       xfs_extlen_t    len,            /* length of extent */
+       int             val,            /* 1 for free, 0 for allocated */
+       xfs_rtblock_t   *new,           /* out: first block not matching */
+       int             *stat)          /* out: 1 for matches, 0 for not */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   lastbit;        /* last useful bit in word */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute starting bitmap block number
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       /*
+        * Read the bitmap block.
+        */
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Compute the starting word's address, and starting bit.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       /*
+        * 0 (allocated) => all zero's; 1 (free) => all one's.
+        */
+       val = -val;
+       /*
+        * If not starting on a word boundary, deal with the first
+        * (partial) word.
+        */
+       if (bit) {
+               /*
+                * Compute first bit not examined.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               /*
+                * Mask of relevant bits.
+                */
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ val) & mask)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = XFS_RTLOBIT(wdiff) - bit;
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               }
+               i = lastbit - bit;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ val)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Mask of relevant bits.
+                */
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ val) & mask)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * Successful, return.
+        */
+       xfs_trans_brelse(tp, bp);
+       *new = start + i;
+       *stat = 1;
+       return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the given extent (block range) is allocated already.
+ */
+STATIC int                             /* error */
+xfs_rtcheck_alloc_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bno,            /* starting block number of extent */
+       xfs_extlen_t    len)            /* length of extent */
+{
+       xfs_rtblock_t   new;            /* dummy for xfs_rtcheck_range */
+       int             stat;
+       int             error;
+
+       error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+       if (error)
+               return error;
+       ASSERT(stat);
+       return 0;
+}
+#else
+#define xfs_rtcheck_alloc_range(m,t,b,l)       (0)
+#endif
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int                                    /* error */
+xfs_rtfree_extent(
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bno,            /* starting block number to free */
+       xfs_extlen_t    len)            /* length of extent freed */
+{
+       int             error;          /* error value */
+       xfs_mount_t     *mp;            /* file system mount structure */
+       xfs_fsblock_t   sb;             /* summary file block number */
+       xfs_buf_t       *sumbp = NULL;  /* summary file block buffer */
+
+       mp = tp->t_mountp;
+
+       ASSERT(mp->m_rbmip->i_itemp != NULL);
+       ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+       error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+       if (error)
+               return error;
+
+       /*
+        * Free the range of realtime blocks.
+        */
+       error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
+       if (error) {
+               return error;
+       }
+       /*
+        * Mark more blocks free in the superblock.
+        */
+       xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
+       /*
+        * If we've now freed all the blocks, reset the file sequence
+        * number to 0.
+        */
+       if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
+           mp->m_sb.sb_rextents) {
+               if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+                       mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+               xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+       }
+       return 0;
+}
+
index a5b59d92eb7095cca4039dbfb2ba5c398ea8fbe2..05b5493d2baa0ab44d86e54b84f678a209801020 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_fsops.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_dinode.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -249,13 +240,13 @@ xfs_mount_validate_sb(
        if (xfs_sb_version_has_pquotino(sbp)) {
                if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
                        xfs_notice(mp,
-                          "Version 5 of Super block has XFS_OQUOTA bits.\n");
+                          "Version 5 of Super block has XFS_OQUOTA bits.");
                        return XFS_ERROR(EFSCORRUPTED);
                }
        } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
                                XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
                        xfs_notice(mp,
-"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.\n");
+"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
                        return XFS_ERROR(EFSCORRUPTED);
        }
 
@@ -624,8 +615,9 @@ xfs_sb_read_verify(
 
 out_error:
        if (error) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
-                                    mp, bp->b_addr);
+               if (error != EWRONGFS)
+                       XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+                                            mp, bp->b_addr);
                xfs_buf_ioerror(bp, error);
        }
 }
index 6835b44f850e58e780c2712e24530dbb3e57f5f4..35061d4b614c7ab9fabb80e1b93ffb6bc8b586d2 100644 (file)
@@ -699,7 +699,4 @@ extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
 extern void    xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
 extern void    xfs_sb_quota_from_disk(struct xfs_sb *sbp);
 
-extern const struct xfs_buf_ops xfs_sb_buf_ops;
-extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
-
 #endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/xfs_shared.h b/fs/xfs/xfs_shared.h
new file mode 100644 (file)
index 0000000..8c5035a
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_SHARED_H__
+#define __XFS_SHARED_H__
+
+/*
+ * Definitions shared between kernel and userspace that don't fit into any other
+ * header file that is shared with userspace.
+ */
+struct xfs_ifork;
+struct xfs_buf;
+struct xfs_buf_ops;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_inode;
+
+/*
+ * Buffer verifier operations are widely used, including userspace tools
+ */
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agfl_buf_ops;
+extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
+extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Transaction types.  Used to distinguish types of buffers. These never reach
+ * the log.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE     1
+#define XFS_TRANS_SETATTR_SIZE         2
+#define XFS_TRANS_INACTIVE             3
+#define XFS_TRANS_CREATE               4
+#define XFS_TRANS_CREATE_TRUNC         5
+#define XFS_TRANS_TRUNCATE_FILE                6
+#define XFS_TRANS_REMOVE               7
+#define XFS_TRANS_LINK                 8
+#define XFS_TRANS_RENAME               9
+#define XFS_TRANS_MKDIR                        10
+#define XFS_TRANS_RMDIR                        11
+#define XFS_TRANS_SYMLINK              12
+#define XFS_TRANS_SET_DMATTRS          13
+#define XFS_TRANS_GROWFS               14
+#define XFS_TRANS_STRAT_WRITE          15
+#define XFS_TRANS_DIOSTRAT             16
+/* 17 was XFS_TRANS_WRITE_SYNC */
+#define        XFS_TRANS_WRITEID               18
+#define        XFS_TRANS_ADDAFORK              19
+#define        XFS_TRANS_ATTRINVAL             20
+#define        XFS_TRANS_ATRUNCATE             21
+#define        XFS_TRANS_ATTR_SET              22
+#define        XFS_TRANS_ATTR_RM               23
+#define        XFS_TRANS_ATTR_FLAG             24
+#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
+#define XFS_TRANS_QM_SBCHANGE          26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1               27
+#define XFS_TRANS_DUMMY2               28
+#define XFS_TRANS_QM_QUOTAOFF          29
+#define XFS_TRANS_QM_DQALLOC           30
+#define XFS_TRANS_QM_SETQLIM           31
+#define XFS_TRANS_QM_DQCLUSTER         32
+#define XFS_TRANS_QM_QINOCREATE                33
+#define XFS_TRANS_QM_QUOTAOFF_END      34
+#define XFS_TRANS_SB_UNIT              35
+#define XFS_TRANS_FSYNC_TS             36
+#define        XFS_TRANS_GROWFSRT_ALLOC        37
+#define        XFS_TRANS_GROWFSRT_ZERO         38
+#define        XFS_TRANS_GROWFSRT_FREE         39
+#define        XFS_TRANS_SWAPEXT               40
+#define        XFS_TRANS_SB_COUNT              41
+#define        XFS_TRANS_CHECKPOINT            42
+#define        XFS_TRANS_ICREATE               43
+#define        XFS_TRANS_TYPE_MAX              43
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+#define XFS_TRANS_TYPES \
+       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
+       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
+       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
+       { XFS_TRANS_CREATE,             "CREATE" }, \
+       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
+       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
+       { XFS_TRANS_REMOVE,             "REMOVE" }, \
+       { XFS_TRANS_LINK,               "LINK" }, \
+       { XFS_TRANS_RENAME,             "RENAME" }, \
+       { XFS_TRANS_MKDIR,              "MKDIR" }, \
+       { XFS_TRANS_RMDIR,              "RMDIR" }, \
+       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
+       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
+       { XFS_TRANS_GROWFS,             "GROWFS" }, \
+       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
+       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
+       { XFS_TRANS_WRITEID,            "WRITEID" }, \
+       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
+       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
+       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
+       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
+       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
+       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
+       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
+       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
+       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
+       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
+       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
+       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
+       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
+       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
+       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
+       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
+       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
+       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
+       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
+       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
+       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
+       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
+       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
+       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
+       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction.  It points to the log item and keeps some
+ * flags to track the state of the log item.  It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+struct xfs_log_item_desc {
+       struct xfs_log_item     *lid_item;
+       struct list_head        lid_trans;
+       unsigned char           lid_flags;
+};
+
+#define XFS_LID_DIRTY          0x1
+
+/* log size calculation functions */
+int    xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
+int    xfs_log_calc_minimum_size(struct xfs_mount *);
+
+
+/*
+ * Values for t_flags.
+ */
+#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
+#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
+#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
+#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
+                                          count in superblock */
+/*
+ * Values for call flags parameter.
+ */
+#define        XFS_TRANS_RELEASE_LOG_RES       0x4
+#define        XFS_TRANS_ABORT                 0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define        XFS_TRANS_SB_ICOUNT             0x00000001
+#define        XFS_TRANS_SB_IFREE              0x00000002
+#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
+#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
+#define        XFS_TRANS_SB_FREXTENTS          0x00000010
+#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
+#define        XFS_TRANS_SB_DBLOCKS            0x00000040
+#define        XFS_TRANS_SB_AGCOUNT            0x00000080
+#define        XFS_TRANS_SB_IMAXPCT            0x00000100
+#define        XFS_TRANS_SB_REXTSIZE           0x00000200
+#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
+#define        XFS_TRANS_SB_RBLOCKS            0x00000800
+#define        XFS_TRANS_SB_REXTENTS           0x00001000
+#define        XFS_TRANS_SB_REXTSLOG           0x00002000
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer reference count
+ * values.  This determines how hard the buffer cache tries to hold onto the
+ * buffer.
+ */
+#define        XFS_AGF_REF             4
+#define        XFS_AGI_REF             4
+#define        XFS_AGFL_REF            3
+#define        XFS_INO_BTREE_REF       3
+#define        XFS_ALLOC_BTREE_REF     2
+#define        XFS_BMAP_BTREE_REF      2
+#define        XFS_DIR_BTREE_REF       2
+#define        XFS_INO_REF             2
+#define        XFS_ATTR_BTREE_REF      1
+#define        XFS_DQUOT_REF           1
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
+#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
+#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
+
+
+/*
+ * Symlink decoding/encoding functions
+ */
+int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
+int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
+                                struct xfs_inode *ip, struct xfs_ifork *ifp);
+
+#endif /* __XFS_SHARED_H__ */
index 15188cc9944919e275e43e4978056efa91801343..4eb63ad87d7dbe88c59971222a7aa82697231a0d 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_fsops.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_trans_priv.h"
-#include "xfs_filestream.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_extfree_item.h"
 #include "xfs_mru_cache.h"
@@ -52,6 +44,9 @@
 #include "xfs_icache.h"
 #include "xfs_trace.h"
 #include "xfs_icreate_item.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
+#include "xfs_quota.h"
 
 #include <linux/namei.h>
 #include <linux/init.h>
@@ -946,10 +941,6 @@ xfs_fs_destroy_inode(
 
        XFS_STATS_INC(vn_reclaim);
 
-       /* bad inode, get out here ASAP */
-       if (is_bad_inode(inode))
-               goto out_reclaim;
-
        ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
 
        /*
@@ -965,7 +956,6 @@ xfs_fs_destroy_inode(
         * this more efficiently than we can here, so simply let background
         * reclaim tear down all inodes.
         */
-out_reclaim:
        xfs_inode_set_reclaim_tag(ip);
 }
 
@@ -1246,7 +1236,7 @@ xfs_fs_remount(
                         */
 #if 0
                        xfs_info(mp,
-               "mount option \"%s\" not supported for remount\n", p);
+               "mount option \"%s\" not supported for remount", p);
                        return -EINVAL;
 #else
                        break;
@@ -1491,10 +1481,6 @@ xfs_fs_fill_super(
                error = ENOENT;
                goto out_unmount;
        }
-       if (is_bad_inode(root)) {
-               error = EINVAL;
-               goto out_unmount;
-       }
        sb->s_root = d_make_root(root);
        if (!sb->s_root) {
                error = ENOMEM;
index f622a97a7e3383d287d85a3787c2feecc8a36b3f..14e58f2c96bd71708f1c608536b835b25f05e795 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* ----- Kernel only functions below ----- */
 STATIC int
@@ -424,8 +424,7 @@ xfs_symlink(
  */
 STATIC int
 xfs_inactive_symlink_rmt(
-       xfs_inode_t     *ip,
-       xfs_trans_t     **tpp)
+       struct xfs_inode *ip)
 {
        xfs_buf_t       *bp;
        int             committed;
@@ -437,11 +436,9 @@ xfs_inactive_symlink_rmt(
        xfs_mount_t     *mp;
        xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
        int             nmaps;
-       xfs_trans_t     *ntp;
        int             size;
        xfs_trans_t     *tp;
 
-       tp = *tpp;
        mp = ip->i_mount;
        ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
        /*
@@ -453,6 +450,16 @@ xfs_inactive_symlink_rmt(
         */
        ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
 
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
        /*
         * Lock the inode, fix the size, and join it to the transaction.
         * Hold it so in the normal path, we still have it locked for
@@ -471,7 +478,7 @@ xfs_inactive_symlink_rmt(
        error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
                                mval, &nmaps, 0);
        if (error)
-               goto error0;
+               goto error_trans_cancel;
        /*
         * Invalidate the block(s). No validation is done.
         */
@@ -481,22 +488,24 @@ xfs_inactive_symlink_rmt(
                        XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
                if (!bp) {
                        error = ENOMEM;
-                       goto error1;
+                       goto error_bmap_cancel;
                }
                xfs_trans_binval(tp, bp);
        }
        /*
         * Unmap the dead block(s) to the free_list.
         */
-       if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
-                       &first_block, &free_list, &done)))
-               goto error1;
+       error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+                           &first_block, &free_list, &done);
+       if (error)
+               goto error_bmap_cancel;
        ASSERT(done);
        /*
         * Commit the first transaction.  This logs the EFI and the inode.
         */
-       if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
-               goto error1;
+       error = xfs_bmap_finish(&tp, &free_list, &committed);
+       if (error)
+               goto error_bmap_cancel;
        /*
         * The transaction must have been committed, since there were
         * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
@@ -510,27 +519,14 @@ xfs_inactive_symlink_rmt(
         */
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       /*
-        * Get a new, empty transaction to return to our caller.
-        */
-       ntp = xfs_trans_dup(tp);
        /*
         * Commit the transaction containing extent freeing and EFDs.
-        * If we get an error on the commit here or on the reserve below,
-        * we need to unlock the inode since the new transaction doesn't
-        * have the inode attached.
         */
-       error = xfs_trans_commit(tp, 0);
-       tp = ntp;
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
        if (error) {
                ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               goto error0;
+               goto error_unlock;
        }
-       /*
-        * transaction commit worked ok so we can drop the extra ticket
-        * reference that we gained in xfs_trans_dup()
-        */
-       xfs_log_ticket_put(tp->t_ticket);
 
        /*
         * Remove the memory for extent descriptions (just bookkeeping).
@@ -538,23 +534,16 @@ xfs_inactive_symlink_rmt(
        if (ip->i_df.if_bytes)
                xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
        ASSERT(ip->i_df.if_bytes == 0);
-       /*
-        * Put an itruncate log reservation in the new transaction
-        * for our caller.
-        */
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
-       if (error) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               goto error0;
-       }
 
-       xfs_trans_ijoin(tp, ip, 0);
-       *tpp = tp;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
        return 0;
 
- error1:
+error_bmap_cancel:
        xfs_bmap_cancel(&free_list);
- error0:
+error_trans_cancel:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
        return error;
 }
 
@@ -563,41 +552,46 @@ xfs_inactive_symlink_rmt(
  */
 int
 xfs_inactive_symlink(
-       struct xfs_inode        *ip,
-       struct xfs_trans        **tp)
+       struct xfs_inode        *ip)
 {
        struct xfs_mount        *mp = ip->i_mount;
        int                     pathlen;
 
        trace_xfs_inactive_symlink(ip);
 
-       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+
        /*
         * Zero length symlinks _can_ exist.
         */
        pathlen = (int)ip->i_d.di_size;
-       if (!pathlen)
+       if (!pathlen) {
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                return 0;
+       }
 
        if (pathlen < 0 || pathlen > MAXPATHLEN) {
                xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)",
                         __func__, (unsigned long long)ip->i_ino, pathlen);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                ASSERT(0);
                return XFS_ERROR(EFSCORRUPTED);
        }
 
        if (ip->i_df.if_flags & XFS_IFINLINE) {
-               if (ip->i_df.if_bytes > 0)
+               if (ip->i_df.if_bytes > 0) 
                        xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
                                          XFS_DATA_FORK);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                ASSERT(ip->i_df.if_bytes == 0);
                return 0;
        }
 
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
        /* remove the remote symlink */
-       return xfs_inactive_symlink_rmt(ip, tp);
+       return xfs_inactive_symlink_rmt(ip);
 }
index 99338ba666ac68c11350fb1b24906364c7a6de01..e75245d0911611aeca2e78dfeae633c03bd34c3e 100644 (file)
@@ -22,6 +22,6 @@
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
                const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp);
+int xfs_inactive_symlink(struct xfs_inode *ip);
 
 #endif /* __XFS_SYMLINK_H */
index 01c85e3f64703e6b7a5c170c9e21421fe36d4e4e..bf59a2b45f8c40c431de3e8f52f3131d80d68a1c 100644 (file)
@@ -19,8 +19,9 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
@@ -30,6 +31,7 @@
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 
 
index 5d7b3e40705ffe4a96c75493d09ac74d9ae265cd..dee3279c095e6a32dcf460ca58acda0aa924bb2d 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
@@ -37,6 +34,8 @@
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_buf_item.h"
 #include "xfs_quota.h"
@@ -46,6 +45,7 @@
 #include "xfs_dquot.h"
 #include "xfs_log_recover.h"
 #include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * We include this last to have the helpers above available for the trace
index 5411e01ab4527318b187846fa3e7a53ad6300eb3..c812c5c060de1caa7532f1cdcc63766a4a227207 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
 #include "xfs_quota.h"
-#include "xfs_qm.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
-#include "xfs_trans_space.h"
-#include "xfs_inode_item.h"
-#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_trace.h"
+#include "xfs_error.h"
 
 kmem_zone_t    *xfs_trans_zone;
 kmem_zone_t    *xfs_log_item_desc_zone;
index 09cf40b89e8c1d85817cc649b22a12c3ba97aa78..9b96d35e483de4cb075170aef47db2a1f06a307e 100644 (file)
 #ifndef        __XFS_TRANS_H__
 #define        __XFS_TRANS_H__
 
-struct xfs_log_item;
-
-#include "xfs_trans_resv.h"
-
 /* kernel only transaction subsystem defines */
 
 struct xfs_buf;
@@ -77,6 +73,9 @@ struct xfs_item_ops {
        void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
 };
 
+void   xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
+                         int type, const struct xfs_item_ops *ops);
+
 /*
  * Return values for the iop_push() routines.
  */
@@ -85,18 +84,12 @@ struct xfs_item_ops {
 #define XFS_ITEM_LOCKED                2
 #define XFS_ITEM_FLUSHING      3
 
-/*
- * This is the type of function which can be given to xfs_trans_callback()
- * to be called upon the transaction's commit to disk.
- */
-typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
 
 /*
  * This is the structure maintained for every active transaction.
  */
 typedef struct xfs_trans {
        unsigned int            t_magic;        /* magic number */
-       xfs_log_callback_t      t_logcb;        /* log callback struct */
        unsigned int            t_type;         /* transaction type */
        unsigned int            t_log_res;      /* amt of log space resvd */
        unsigned int            t_log_count;    /* count for perm log res */
@@ -132,7 +125,6 @@ typedef struct xfs_trans {
        int64_t                 t_rextents_delta;/* superblocks rextents chg */
        int64_t                 t_rextslog_delta;/* superblocks rextslog chg */
        struct list_head        t_items;        /* log item descriptors */
-       xfs_trans_header_t      t_header;       /* header for in-log trans */
        struct list_head        t_busy;         /* list of busy extents */
        unsigned long           t_pflags;       /* saved process flags state */
 } xfs_trans_t;
@@ -237,10 +229,16 @@ void              xfs_trans_log_efd_extent(xfs_trans_t *,
                                         xfs_fsblock_t,
                                         xfs_extlen_t);
 int            xfs_trans_commit(xfs_trans_t *, uint flags);
+int            xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void           xfs_trans_cancel(xfs_trans_t *, int);
 int            xfs_trans_ail_init(struct xfs_mount *);
 void           xfs_trans_ail_destroy(struct xfs_mount *);
 
+void           xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
+                                      enum xfs_blft);
+void           xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
+                                       struct xfs_buf *src_bp);
+
 extern kmem_zone_t     *xfs_trans_zone;
 extern kmem_zone_t     *xfs_log_item_desc_zone;
 
index 21c6d7ddbc06b474e102b30cb6a13c7690d1a2a5..4b47cfebd25b8ad34f353469281484d4a8df1295 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_trace.h"
 #include "xfs_error.h"
+#include "xfs_log.h"
 
 #ifdef DEBUG
 /*
index 8c75b8f672702419beede8e54363ec259616039a..c035d11b7734196c4fd689121d945e598a8d6d7a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
index 54ee3c5dee76093b6a6136a5fde759a8be309ccd..cd2a10e15d3ac5520661ed2877f2e5c9cdac48d9 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 
 STATIC void    xfs_trans_alloc_dqinfo(xfs_trans_t *);
index 8d71b16eccaeaca46a0a7e92a7bb69ac3d1a4b4e..47978ba89dae35307c5341d7b50bc3dc3728d140 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_extfree_item.h"
 
index 53dfe46f3680791a8eab2e72c1a92db3cb17c091..1bba7f60d94cab1fe153b073b8ca42f24fbd4bfc 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
index c52def0b441cd89f2cb207e1b7ba5a9f22cc915c..12e86af9d9b94327ea13384fe4a6c2018bf84e1d 100644 (file)
@@ -27,7 +27,6 @@ struct xfs_log_vec;
 
 
 void   xfs_trans_init(struct xfs_mount *);
-int    xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void   xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void   xfs_trans_del_item(struct xfs_log_item *);
 void   xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
index a65a3cc40610abe92b0b158a83329190d6cbdf91..d53d9f0627a779cacab8adaa5aea71be36f8e41e 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
-#include "xfs_bmap_util.h"
 #include "xfs_quota.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
index db14d0c08682b90949f031532069f715ca628804..3e8e797c6d110ce01b0964a2e3845d8a59defd0b 100644 (file)
@@ -24,14 +24,6 @@ struct file;
 struct xfs_inode;
 struct attrlist_cursor_kern;
 
-/*
- * Return values for xfs_inactive.  A return value of
- * VN_INACTIVE_NOCACHE implies that the file system behavior
- * has disassociated its state and bhv_desc_t from the vnode.
- */
-#define        VN_INACTIVE_CACHE       0
-#define        VN_INACTIVE_NOCACHE     1
-
 /*
  * Flags for read/write calls - same values as IRIX
  */
index e01f35ea76ba436310f11d82b9fdf78322d8f6fe..9d479073ba415d6b482fbecbf160b6b99e4e859c 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
index cf051e05a8fe682894251ec5944299a8ead8277f..4e280bd226ddaa107a3d19f08b07389279b53922 100644 (file)
@@ -125,8 +125,9 @@ struct acpi_exception_info {
 #define AE_NO_HANDLER                   EXCEP_ENV (0x001A)
 #define AE_OWNER_ID_LIMIT               EXCEP_ENV (0x001B)
 #define AE_NOT_CONFIGURED               EXCEP_ENV (0x001C)
+#define AE_ACCESS                       EXCEP_ENV (0x001D)
 
-#define AE_CODE_ENV_MAX                 0x001C
+#define AE_CODE_ENV_MAX                 0x001D
 
 /*
  * Programmer exceptions
@@ -227,7 +228,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
        EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
        EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"),
        EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"),
-       EXCEP_TXT("AE_NOT_FOUND", "The name was not found in the namespace"),
+       EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"),
        EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"),
        EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"),
        EXCEP_TXT("AE_TYPE", "The object type is incorrect"),
@@ -259,7 +260,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
        EXCEP_TXT("AE_OWNER_ID_LIMIT",
                  "There are no more Owner IDs available for ACPI tables or control methods"),
        EXCEP_TXT("AE_NOT_CONFIGURED",
-                 "The interface is not part of the current subsystem configuration")
+                 "The interface is not part of the current subsystem configuration"),
+       EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
index 02e113bb8b7d5c777fdc435a5285208238ce66e9..15100f625e6550a94bc2afe721ccf3804a2360d5 100644 (file)
@@ -222,7 +222,8 @@ struct acpi_device_power_flags {
        u32 power_resources:1;  /* Power resources */
        u32 inrush_current:1;   /* Serialize Dx->D0 */
        u32 power_removed:1;    /* Optimize Dx->D0 */
-       u32 reserved:28;
+       u32 ignore_parent:1;    /* Power is independent of parent power state */
+       u32 reserved:27;
 };
 
 struct acpi_device_power_state {
@@ -311,7 +312,6 @@ struct acpi_device {
        unsigned int physical_node_count;
        struct list_head physical_node_list;
        struct mutex physical_node_lock;
-       struct list_head power_dependent;
        void (*remove)(struct acpi_device *);
 };
 
@@ -456,8 +456,6 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
 acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
                                    acpi_notify_handler handler);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
 #else
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
                                               acpi_notify_handler handler,
@@ -478,10 +476,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
        return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
                m : ACPI_STATE_D0;
 }
-static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
-                                            struct device *depdev) {}
-static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
-                                               struct device *depdev) {}
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
index 85bfdbe178052bd2231dce137b1463d174b2bffe..c7b1475422b3f3ed140e852f9a3407719c8f68b2 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130725
+#define ACPI_CA_VERSION                 0x20130823
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -280,9 +280,16 @@ acpi_status
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
-                               acpi_install_global_event_handler
-                               (acpi_gbl_event_handler handler, void *context))
-
+                               acpi_install_sci_handler(acpi_sci_handler
+                                                        address,
+                                                        void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_remove_sci_handler(acpi_sci_handler
+                                                        address))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_install_global_event_handler
+                                (acpi_gbl_event_handler handler,
+                                 void *context))
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                 acpi_install_fixed_event_handler(u32
                                                                  acpi_event,
index b748aefce929983cf94d3729db782e62c2843768..f6abf23ad0a71d51df5c86806c1b504b678a4bc1 100644 (file)
@@ -474,6 +474,11 @@ typedef u64 acpi_integer;
 #define ACPI_MOVE_NAME(dest,src)        (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
 #endif
 
+/* Support for the special RSDP signature (8 characters) */
+
+#define ACPI_VALIDATE_RSDP_SIG(a)       (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest)        (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+
 /*******************************************************************************
  *
  * Miscellaneous constants
@@ -945,6 +950,9 @@ typedef void
 /*
  * Various handlers and callback procedures
  */
+typedef
+u32 (*acpi_sci_handler) (void *context);
+
 typedef
 void (*acpi_gbl_event_handler) (u32 event_type,
                               acpi_handle device,
index 68534ef86ec81a903e09ccd93169baf69b4bb73e..fda0f3e35c03fc40043cbfdba951cbe1852d9ca6 100644 (file)
@@ -87,7 +87,7 @@
 #define ACPI_FLUSH_CPU_CACHE()
 #define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
 
-#if defined(__ia64__) || defined(__x86_64__)
+#if defined(__ia64__) || defined(__x86_64__) || defined(__aarch64__)
 #define ACPI_MACHINE_WIDTH          64
 #define COMPILER_DEPENDENT_INT64    long
 #define COMPILER_DEPENDENT_UINT64   unsigned long
index 66096d06925e417d70d6b8974a00a035d05ef1d8..7816e45f5d5a0177b7aac0b4b40fdd9e125a97bb 100644 (file)
@@ -199,6 +199,7 @@ struct acpi_processor_flags {
 struct acpi_processor {
        acpi_handle handle;
        u32 acpi_id;
+       u32 apic_id;
        u32 id;
        u32 pblk;
        int performance_platform_limit;
@@ -314,6 +315,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 
 /* in processor_core.c */
 void acpi_processor_set_pdc(acpi_handle handle);
+int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int apic_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_throttling.c */
index d06079c774a081e5a5f467da8cb4680c9d23054e..99b490b4d05aa5cff749937f3a3a69a5c3406f92 100644 (file)
@@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
        return mk_pte(page, pgprot);
 }
 
-static inline int huge_pte_write(pte_t pte)
+static inline unsigned long huge_pte_write(pte_t pte)
 {
        return pte_write(pte);
 }
 
-static inline int huge_pte_dirty(pte_t pte)
+static inline unsigned long huge_pte_dirty(pte_t pte)
 {
        return pte_dirty(pte);
 }
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b1a49677fe2540e93aae3b40b1c235949ac756cf 100644 (file)
@@ -0,0 +1 @@
+/* no content, but patch(1) dislikes empty files */
index 290734191f72d85a855022a2f0e24e7a6c895ffb..b46fb45f2cca4a5881b64d93577e6ae03a221670 100644 (file)
@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
 extern int drm_rmctx(struct drm_device *dev, void *data,
                     struct drm_file *file_priv);
 
-extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
-                                        struct drm_file *file_priv);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 
 extern int drm_setsareactx(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
index edbd250809cb6d415e893a2d26a9d6db302925fd..bed35e36fd2748515ed75f47c9d642462223c852 100644 (file)
@@ -23,7 +23,7 @@
 #define PULL_UP                        (1 << 4)
 #define ALTELECTRICALSEL       (1 << 5)
 
-/* 34xx specific mux bit defines */
+/* omap3/4/5 specific mux bit defines */
 #define INPUT_EN               (1 << 8)
 #define OFF_EN                 (1 << 9)
 #define OFFOUT_EN              (1 << 10)
@@ -31,8 +31,6 @@
 #define OFF_PULL_EN            (1 << 12)
 #define OFF_PULL_UP            (1 << 13)
 #define WAKEUP_EN              (1 << 14)
-
-/* 44xx specific mux bit defines */
 #define WAKEUP_EVENT           (1 << 15)
 
 /* Active pin states */
index a5db4aeefa3642107e28b431c21a89c195569993..35e68358ad066e8f34b18e31e548228d4706db5f 100644 (file)
@@ -116,7 +116,7 @@ void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int *pcpu);
+int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
 int acpi_unmap_lsapic(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
@@ -294,58 +294,51 @@ void __init acpi_nvs_nosave_s3(void);
 #endif /* CONFIG_PM_SLEEP */
 
 struct acpi_osc_context {
-       char *uuid_str; /* uuid string */
+       char *uuid_str;                 /* UUID string */
        int rev;
-       struct acpi_buffer cap; /* arg2/arg3 */
-       struct acpi_buffer ret; /* free by caller if success */
+       struct acpi_buffer cap;         /* list of DWORD capabilities */
+       struct acpi_buffer ret;         /* free by caller if success */
 };
 
-#define OSC_QUERY_TYPE                 0
-#define OSC_SUPPORT_TYPE               1
-#define OSC_CONTROL_TYPE               2
-
-/* _OSC DW0 Definition */
-#define OSC_QUERY_ENABLE               1
-#define OSC_REQUEST_ERROR              2
-#define OSC_INVALID_UUID_ERROR         4
-#define OSC_INVALID_REVISION_ERROR     8
-#define OSC_CAPABILITIES_MASK_ERROR    16
-
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT             1
-#define OSC_SB_PPC_OST_SUPPORT         2
-#define OSC_SB_PR3_SUPPORT             4
-#define OSC_SB_HOTPLUG_OST_SUPPORT     8
-#define OSC_SB_APEI_SUPPORT            16
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD                                0       /* DWORD 1 */
+#define OSC_SUPPORT_DWORD                      1       /* DWORD 2 */
+#define OSC_CONTROL_DWORD                      2       /* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE                       0x00000001  /* input */
+#define OSC_REQUEST_ERROR                      0x00000002  /* return */
+#define OSC_INVALID_UUID_ERROR                 0x00000004  /* return */
+#define OSC_INVALID_REVISION_ERROR             0x00000008  /* return */
+#define OSC_CAPABILITIES_MASK_ERROR            0x00000010  /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT                     0x00000001
+#define OSC_SB_PPC_OST_SUPPORT                 0x00000002
+#define OSC_SB_PR3_SUPPORT                     0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT             0x00000008
+#define OSC_SB_APEI_SUPPORT                    0x00000010
+#define OSC_SB_CPC_SUPPORT                     0x00000020
 
 extern bool osc_sb_apei_support_acked;
 
-/* PCI defined _OSC bits */
-/* _OSC DW1 Definition (OS Support Fields) */
-#define OSC_EXT_PCI_CONFIG_SUPPORT             1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT           2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT       4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT         8
-#define OSC_MSI_SUPPORT                                16
-#define OSC_PCI_SUPPORT_MASKS                  0x1f
-
-/* _OSC DW1 Definition (OS Control Fields) */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL      1
-#define OSC_SHPC_NATIVE_HP_CONTROL             2
-#define OSC_PCI_EXPRESS_PME_CONTROL            4
-#define OSC_PCI_EXPRESS_AER_CONTROL            8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL  16
-
-#define OSC_PCI_CONTROL_MASKS  (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |    \
-                               OSC_SHPC_NATIVE_HP_CONTROL |            \
-                               OSC_PCI_EXPRESS_PME_CONTROL |           \
-                               OSC_PCI_EXPRESS_AER_CONTROL |           \
-                               OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |    \
-                               OSC_SHPC_NATIVE_HP_CONTROL)
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT             0x00000001
+#define OSC_PCI_ASPM_SUPPORT                   0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT               0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT         0x00000008
+#define OSC_PCI_MSI_SUPPORT                    0x00000010
+#define OSC_PCI_SUPPORT_MASKS                  0x0000001f
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL      0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL         0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL            0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL            0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL     0x00000010
+#define OSC_PCI_CONTROL_MASKS                  0x0000001f
 
 extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
                                             u32 *mask, u32 req);
index d9c92daa3944e43a13f285a7baaa1443868cce3d..f01e7e370691ee5ca840d4e011bb4a1545de1f12 100644 (file)
@@ -14,6 +14,14 @@ struct kiocb;
 
 #define KIOCB_KEY              0
 
+/*
+ * opcode values not exposed to user space
+ */
+enum {
+       IOCB_CMD_READ_ITER = 0x10000,
+       IOCB_CMD_WRITE_ITER = 0x10001,
+};
+
 /*
  * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
  * cancelled or completed (this makes a certain amount of sense because
@@ -31,13 +39,15 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
 
 struct kiocb {
        struct file             *ki_filp;
-       struct kioctx           *ki_ctx;        /* NULL for sync ops */
+       struct kioctx           *ki_ctx;        /* NULL for sync ops,
+                                                * -1 for kernel caller */
        kiocb_cancel_fn         *ki_cancel;
        void                    *private;
 
        union {
                void __user             *user;
                struct task_struct      *tsk;
+               void                    (*complete)(u64 user_data, long res);
        } ki_obj;
 
        __u64                   ki_user_data;   /* user's data for completion */
@@ -59,6 +69,11 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_ctx == NULL;
 }
 
+static inline bool is_kernel_kiocb(struct kiocb *kiocb)
+{
+       return kiocb->ki_ctx == (void *)-1;
+}
+
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
        *kiocb = (struct kiocb) {
@@ -77,6 +92,14 @@ extern void exit_aio(struct mm_struct *mm);
 extern long do_io_submit(aio_context_t ctx_id, long nr,
                         struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
+struct kiocb *aio_kernel_alloc(gfp_t gfp);
+void aio_kernel_free(struct kiocb *iocb);
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp, size_t nr,
+                       loff_t off);
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data);
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
index 43ec7e247a8086972ae7ef0e87efde66dd4ac0e7..682df0e1954a96718ae1e439db73441681101145 100644 (file)
@@ -30,7 +30,6 @@ struct amba_device {
        struct device           dev;
        struct resource         res;
        struct clk              *pclk;
-       u64                     dma_mask;
        unsigned int            periphid;
        unsigned int            irq[AMBA_NR_IRQS];
 };
@@ -131,7 +130,6 @@ struct amba_device name##_device = {                                \
 struct amba_device name##_device = {                           \
        .dev = __AMBA_DEV(busid, data, ~0ULL),                  \
        .res = DEFINE_RES_MEM(base, SZ_4K),                     \
-       .dma_mask = ~0ULL,                                      \
        .irq = irqs,                                            \
        .periphid = id,                                         \
 }
index 53b77949c79d4c84b6595614206857e2b6796b1f..5f9cd963213dd3a3150abd3e34f62a6925096053 100644 (file)
@@ -100,6 +100,9 @@ struct backlight_device {
        /* The framebuffer notifier block */
        struct notifier_block fb_notif;
 
+       /* list entry of all registered backlight devices */
+       struct list_head entry;
+
        struct device dev;
 };
 
@@ -123,6 +126,7 @@ extern void devm_backlight_device_unregister(struct device *dev,
                                        struct backlight_device *bd);
 extern void backlight_force_update(struct backlight_device *bd,
                                   enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
index f7f1d7169b11c332612ba09a1663141870e08849..089743ade734fc564e18bfa7be35608da68d6fde 100644 (file)
@@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page)
        return false;
 }
 
+/*
+ * isolated_balloon_page - identify an isolated balloon page on private
+ *                        compaction/migration page lists.
+ *
+ * After a compaction thread isolates a balloon page for migration, it raises
+ * the page refcount to prevent concurrent compaction threads from re-isolating
+ * the same page. For that reason putback_movable_pages(), or other routines
+ * that need to identify isolated balloon pages on private pagelists, cannot
+ * rely on balloon_page_movable() to accomplish the task.
+ */
+static inline bool isolated_balloon_page(struct page *page)
+{
+       /* Already isolated balloon pages, by default, have a raised refcount */
+       if (page_flags_cleared(page) && !page_mapped(page) &&
+           page_count(page) >= 2)
+               return __is_movable_balloon_page(page);
+
+       return false;
+}
+
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
  *                      the page->mapping assignment accordingly.
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
        return false;
 }
 
+static inline bool isolated_balloon_page(struct page *page)
+{
+       return false;
+}
+
 static inline bool balloon_page_isolate(struct page *page)
 {
        return false;
index d66033f418c98bf1818ba2c5fcbdc7bcebe8021b..0333e605ea0d752c5aa306957a80aa72c5d4094f 100644 (file)
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
                                 struct bcma_device *core, bool enable);
 extern void bcma_core_pci_up(struct bcma_bus *bus);
 extern void bcma_core_pci_down(struct bcma_bus *bus);
+extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
 
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
index ec48bac5b039d25cd74829b1ba4c6a1506412dec..4fd52534259697abc49023fe28a7b30cbaf1cc23 100644 (file)
@@ -307,6 +307,14 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+static inline ssize_t bvec_length(const struct bio_vec *bvec, unsigned long nr)
+{
+       ssize_t bytes = 0;
+       while (nr--)
+               bytes += (bvec++)->bv_len;
+       return bytes;
+}
+
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_current(struct bio *bio);
 void bio_disassociate_task(struct bio *bio);
index a3b6b82108b9ad239dd1dda1abe6c8e3f585d3b3..5a1c8b71ccd84616b0471b833ee909c9d794be5f 100644 (file)
@@ -4,8 +4,11 @@
 
 #ifdef __KERNEL__
 #define BIT(nr)                        (1UL << (nr))
+#define BIT_ULL(nr)            (1ULL << (nr))
 #define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)       (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)       ((nr) / BITS_PER_LONG_LONG)
 #define BITS_PER_BYTE          8
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
index fa1abeb45b7602a4f0c1a4098f05f63d7a075281..1bea25f14796f8fabed2b310adde5316fe893af1 100644 (file)
@@ -176,7 +176,6 @@ enum rq_flag_bits {
        __REQ_FLUSH_SEQ,        /* request for flush sequence */
        __REQ_IO_STAT,          /* account I/O stat */
        __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
-       __REQ_KERNEL,           /* direct IO to kernel pages */
        __REQ_PM,               /* runtime pm request */
        __REQ_NR_BITS,          /* stops here */
 };
@@ -227,7 +226,6 @@ enum rq_flag_bits {
 #define REQ_IO_STAT            (1 << __REQ_IO_STAT)
 #define REQ_MIXED_MERGE                (1 << __REQ_MIXED_MERGE)
 #define REQ_SECURE             (1 << __REQ_SECURE)
-#define REQ_KERNEL             (1 << __REQ_KERNEL)
 #define REQ_PM                 (1 << __REQ_PM)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 2fdb4a451b49bd626d9415b231c76b7ac927cf69..0e6f765aa1f5adf3acbf54e27cea6fbe495de1cb 100644 (file)
@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
        return blk_queue_get_max_sectors(q, rq->cmd_flags);
 }
 
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+       unsigned int nr_bios = 0;
+       struct bio *bio;
+
+       __rq_for_each_bio(bio, rq)
+               nr_bios++;
+
+       return nr_bios;
+}
+
 /*
  * Request issue related functions.
  */
index 842de225055fc5b8c769c59ff37e5afa81574f38..ded429966c1f447db9106359b174fb742fd3fe54 100644 (file)
 #define __visible __attribute__((externally_visible))
 #endif
 
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#if GCC_VERSION <= 40801
+# define asm_volatile_goto(x...)       do { asm goto(x); asm (""); } while (0)
+#else
+# define asm_volatile_goto(x...)       do { asm goto(x); } while (0)
+#endif
 
 #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
 #if GCC_VERSION >= 40400
index 801ff9e73679a00d7f945d82253454a236319966..3434ef7de017de0d5a33e7dd5dbfb173678c58c0 100644 (file)
@@ -185,19 +185,6 @@ extern void cpu_hotplug_enable(void);
 void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-extern void cpu_hotplug_driver_lock(void);
-extern void cpu_hotplug_driver_unlock(void);
-#else
-static inline void cpu_hotplug_driver_lock(void)
-{
-}
-
-static inline void cpu_hotplug_driver_unlock(void)
-{
-}
-#endif
-
 #else          /* CONFIG_HOTPLUG_CPU */
 
 static inline void cpu_hotplug_begin(void) {}
index fcabc42d66ab413a38730cb78d8eedc9bda91084..0aba2a6cadafcb9e73a42dd04b74a5c1d433ad36 100644 (file)
@@ -180,13 +180,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 struct cpufreq_driver {
        char                    name[CPUFREQ_NAME_LEN];
        u8                      flags;
-       /*
-        * This should be set by platforms having multiple clock-domains, i.e.
-        * supporting multiple policies. With this sysfs directories of governor
-        * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
-        * use the same governor with different tunables for different clusters.
-        */
-       bool                    have_governor_per_policy;
 
        /* needed by all drivers */
        int     (*init)         (struct cpufreq_policy *policy);
@@ -211,13 +204,22 @@ struct cpufreq_driver {
 };
 
 /* flags */
-#define CPUFREQ_STICKY         0x01    /* the driver isn't removed even if
-                                        * all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS    0x02    /* loops_per_jiffy or other kernel
-                                        * "constants" aren't affected by
-                                        * frequency transitions */
-#define CPUFREQ_PM_NO_WARN     0x04    /* don't warn on suspend/resume speed
-                                        * mismatches */
+#define CPUFREQ_STICKY         (1 << 0)        /* driver isn't removed even if
+                                                  all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS    (1 << 1)        /* loops_per_jiffy or other
+                                                  kernel "constants" aren't
+                                                  affected by frequency
+                                                  transitions */
+#define CPUFREQ_PM_NO_WARN     (1 << 2)        /* don't warn on suspend/resume
+                                                  speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
 
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -240,6 +242,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
        return;
 }
 
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+                       policy->cpuinfo.max_freq);
+}
+
 /*********************************************************************
  *                     CPUFREQ NOTIFIER INTERFACE                    *
  *********************************************************************/
@@ -392,6 +401,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
 
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table,
@@ -407,8 +417,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
 void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
                                      unsigned int cpu);
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+                                     struct cpufreq_frequency_table *table);
+
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+               struct cpufreq_frequency_table *table,
+               unsigned int transition_latency);
+static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
+{
+       cpufreq_frequency_table_put_attr(policy->cpu);
+       return 0;
+}
 
 #endif /* _LINUX_CPUFREQ_H */
index 59066e0b4ff134fde5679d582246e942df9f86fc..716c3760ee3970908577ccdb4ea32fa0275d7e33 100644 (file)
@@ -224,6 +224,7 @@ static inline int dname_external(const struct dentry *dentry)
 extern void d_instantiate(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
 extern void d_drop(struct dentry *dentry);
 extern void d_delete(struct dentry *);
index 5f1ab92107e63b1889da7e544189f8b73cc740fb..7a7cc74d7f27e9901e59d2a61b850c0ec6acbe9e 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/notifier.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 #define DEVFREQ_NAME_LEN 16
 
@@ -187,7 +187,7 @@ extern int devfreq_suspend_device(struct devfreq *devfreq);
 extern int devfreq_resume_device(struct devfreq *devfreq);
 
 /* Helper functions for devfreq user device driver with OPP. */
-extern struct opp *devfreq_recommended_opp(struct device *dev,
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
                                           unsigned long *freq, u32 flags);
 extern int devfreq_register_opp_notifier(struct device *dev,
                                         struct devfreq *devfreq);
@@ -238,7 +238,7 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
        return 0;
 }
 
-static inline struct opp *devfreq_recommended_opp(struct device *dev,
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
                                           unsigned long *freq, u32 flags)
 {
        return ERR_PTR(-EINVAL);
index 653073de09e379ef1c8b04c1a96d0ef2c948f5a3..ed419c62dde1876f4561179b6b4bc3052a50ed14 100644 (file)
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
 union map_info *dm_get_mapinfo(struct bio *bio);
 union map_info *dm_get_rq_mapinfo(struct request *rq);
 
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
 /*
  * Geometry functions.
  */
 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 
-
 /*-----------------------------------------------------------------
  * Functions for manipulating device-mapper tables.
  *---------------------------------------------------------------*/
index 3a8d0a2af6077b45acc0126f7f6c57a3661caf7f..fd4aee29ad10caa5bd8073a31c7827baa85d11ae 100644 (file)
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 }
 #endif
 
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+       int rc = dma_set_mask(dev, mask);
+       if (rc == 0)
+               dma_set_coherent_mask(dev, mask);
+       return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+       dev->dma_mask = &dev->coherent_dma_mask;
+       return dma_set_mask_and_coherent(dev, mask);
+}
+
 extern u64 dma_get_required_mask(struct device *dev);
 
 static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
                return -EIO;
 }
 
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+       return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_handle, gfp_t flag)
 {
index 0bc727534108d5a2d5d527e75eaa8020a3ccd239..4b460a683968670f5eaf836fde0ff16e88e39d9a 100644 (file)
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
 
 /**
  * enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
  * @DMA_IN_PROGRESS: transaction not yet processed
  * @DMA_PAUSED: transaction is paused
  * @DMA_ERROR: transaction failed
  */
 enum dma_status {
-       DMA_SUCCESS,
+       DMA_COMPLETE,
        DMA_IN_PROGRESS,
        DMA_PAUSED,
        DMA_ERROR,
@@ -979,10 +979,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
 {
        if (last_complete <= last_used) {
                if ((cookie <= last_complete) || (cookie > last_used))
-                       return DMA_SUCCESS;
+                       return DMA_COMPLETE;
        } else {
                if ((cookie <= last_complete) && (cookie > last_used))
-                       return DMA_SUCCESS;
+                       return DMA_COMPLETE;
        }
        return DMA_IN_PROGRESS;
 }
@@ -1013,11 +1013,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
 }
 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 {
-       return DMA_SUCCESS;
+       return DMA_COMPLETE;
 }
 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
-       return DMA_SUCCESS;
+       return DMA_COMPLETE;
 }
 static inline void dma_issue_pending_all(void)
 {
index d8b512496e50c155e99712f5226e115159beabc1..fc4a9aa7dd82c7a26e69ac21cce27bdc399dc9b1 100644 (file)
 #include <asm/unaligned.h>
 
 #ifdef __KERNEL__
-extern __be16          eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 extern const struct header_ops eth_header_ops;
 
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
-                     unsigned short type,
-                     const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
-                                   const struct net_device *dev,
-                                   const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
-
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+              const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+                    __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+                            const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
                                            unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
index e460ef8319841dd83d6f6f1eb8653dfd5452d6e0..5009fa16b5d8f08369ccdcb6537bdeb426598d0a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/if_fc.h>
 
 #ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FCDEVICE_H */
index 155bafd9e886607f5ee6b9bd824c2a1fa8ec6c32..9a79f0106da1a66ea0f7f647468fc3fd5c351108 100644 (file)
 #include <linux/if_fddi.h>
 
 #ifdef __KERNEL__
-extern __be16  fddi_type_trans(struct sk_buff *skb,
-                               struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FDDIDEVICE_H */
index a6ac84871d6d415eb0e671b45daeddacbc7ad4a3..ff4e40cd45b1dcb15c66f1e178b655df7c40c0eb 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/atomic.h>
 #include <linux/compat.h>
+#include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
 #ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
 {
        atomic_t                refcnt;
        unsigned int            len;    /* Number of filter blocks */
+       struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct sock_filter *filter);
-       struct rcu_head         rcu;
-       struct sock_filter      insns[0];
+       union {
+               struct sock_filter      insns[0];
+               struct work_struct      work;
+       };
 };
 
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
 {
-       return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+       return max(sizeof(struct sk_filter),
+                  offsetof(struct sk_filter, insns[proglen]));
 }
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 }
 #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
 #else
+#include <linux/slab.h>
 static inline void bpf_jit_compile(struct sk_filter *fp)
 {
 }
 static inline void bpf_jit_free(struct sk_filter *fp)
 {
+       kfree(fp);
 }
 #define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
 #endif
index 3f40547ba1917cd038f085bcdb6c6e577a9d4538..4c743ed2e46e5bb771911fb6b28a9400b7c47d5b 100644 (file)
@@ -182,8 +182,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define READ                   0
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
-#define KERNEL_READ            (READ|REQ_KERNEL)
-#define KERNEL_WRITE           (WRITE|REQ_KERNEL)
 
 #define READ_SYNC              (READ | REQ_SYNC)
 #define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -291,25 +289,108 @@ struct address_space;
 struct writeback_control;
 
 struct iov_iter {
-       const struct iovec *iov;
+       struct iov_iter_ops *ops;
+       unsigned long data;
        unsigned long nr_segs;
        size_t iov_offset;
        size_t count;
 };
 
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
+struct iov_iter_ops {
+       size_t (*ii_copy_to_user_atomic)(struct page *, struct iov_iter *,
+                                        unsigned long, size_t);
+       size_t (*ii_copy_to_user)(struct page *, struct iov_iter *,
+                                 unsigned long, size_t, int);
+       size_t (*ii_copy_from_user_atomic)(struct page *, struct iov_iter *,
+                                          unsigned long, size_t);
+       size_t (*ii_copy_from_user)(struct page *, struct iov_iter *,
+                                         unsigned long, size_t);
+       void (*ii_advance)(struct iov_iter *, size_t);
+       int (*ii_fault_in_readable)(struct iov_iter *, size_t);
+       size_t (*ii_single_seg_count)(const struct iov_iter *);
+       int (*ii_shorten)(struct iov_iter *, size_t);
+};
+
+static inline size_t iov_iter_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user_atomic(page, i, offset, bytes);
+}
+static inline size_t __iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 0);
+}
+static inline size_t iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 1);
+}
+static inline size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user(page, i, offset, bytes);
+}
+static inline void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_advance(i, bytes);
+}
+static inline int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_fault_in_readable(i, bytes);
+}
+static inline size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+       return i->ops->ii_single_seg_count(i);
+}
+static inline int iov_iter_shorten(struct iov_iter *i, size_t count)
+{
+       return i->ops->ii_shorten(i, count);
+}
+
+#ifdef CONFIG_BLOCK
+extern struct iov_iter_ops ii_bvec_ops;
+
+struct bio_vec;
+static inline void iov_iter_init_bvec(struct iov_iter *i,
+                                     struct bio_vec *bvec,
+                                     unsigned long nr_segs,
+                                     size_t count, size_t written)
+{
+       i->ops = &ii_bvec_ops;
+       i->data = (unsigned long)bvec;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count + written;
+
+       iov_iter_advance(i, written);
+}
+
+static inline int iov_iter_has_bvec(struct iov_iter *i)
+{
+       return i->ops == &ii_bvec_ops;
+}
+
+static inline struct bio_vec *iov_iter_bvec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_bvec(i));
+       return (struct bio_vec *)i->data;
+}
+#endif
+
+extern struct iov_iter_ops ii_iovec_ops;
 
 static inline void iov_iter_init(struct iov_iter *i,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count, size_t written)
 {
-       i->iov = iov;
+       i->ops = &ii_iovec_ops;
+       i->data = (unsigned long)iov;
        i->nr_segs = nr_segs;
        i->iov_offset = 0;
        i->count = count + written;
@@ -317,6 +398,17 @@ static inline void iov_iter_init(struct iov_iter *i,
        iov_iter_advance(i, written);
 }
 
+static inline int iov_iter_has_iovec(struct iov_iter *i)
+{
+       return i->ops == &ii_iovec_ops;
+}
+
+static inline struct iovec *iov_iter_iovec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_iovec(i));
+       return (struct iovec *)i->data;
+}
+
 static inline size_t iov_iter_count(struct iov_iter *i)
 {
        return i->count;
@@ -369,8 +461,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int,
                                                void **, unsigned long *);
        /*
@@ -1529,7 +1621,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1554,6 +1648,18 @@ struct file_operations {
        int (*show_fdinfo)(struct seq_file *m, struct file *f);
 };
 
+static inline int file_readable(struct file *filp)
+{
+       return filp && (filp->f_op->read || filp->f_op->aio_read ||
+                       filp->f_op->read_iter);
+}
+
+static inline int file_writable(struct file *filp)
+{
+       return filp && (filp->f_op->write || filp->f_op->aio_write ||
+                       filp->f_op->write_iter);
+}
+
 struct inode_operations {
        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
        void * (*follow_link) (struct dentry *, struct nameidata *);
@@ -2292,6 +2398,11 @@ static inline void allow_write_access(struct file *file)
        if (file)
                atomic_inc(&file_inode(file)->i_writecount);
 }
+static inline bool inode_is_open_for_write(const struct inode *inode)
+{
+       return atomic_read(&inode->i_writecount) > 0;
+}
+
 #ifdef CONFIG_IMA
 static inline void i_readcount_dec(struct inode *inode)
 {
@@ -2398,25 +2509,36 @@ extern int sb_min_blocksize(struct super_block *, int);
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
-               unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+                               unsigned long size, pgoff_t pgoff);
+extern int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                               unsigned long offset, unsigned long size);
 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
                loff_t *);
+extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t *);
 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
                unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_direct_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t, loff_t *, size_t);
 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
                unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t generic_file_buffered_write_iter(struct kiocb *,
+               struct iov_iter *, loff_t, loff_t *, size_t, ssize_t);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 extern int generic_segment_checks(const struct iovec *iov,
                unsigned long *nr_segs, size_t *count, int access_flags);
 
 /* fs/block_dev.c */
-extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos);
+extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
                        int datasync);
 extern void block_sync_page(struct page *page);
@@ -2473,16 +2595,16 @@ enum {
 void dio_end_io(struct bio *bio, int error);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags);
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags);
 
 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-               struct inode *inode, const struct iovec *iov, loff_t offset,
-               unsigned long nr_segs, get_block_t get_block)
+               struct inode *inode, struct iov_iter *iter, loff_t offset,
+               get_block_t get_block)
 {
-       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                   offset, nr_segs, get_block, NULL, NULL,
+       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                   offset, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
 #endif
@@ -2502,6 +2624,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
                int nofs);
 extern int page_symlink(struct inode *inode, const char *symname, int len);
 extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
 extern int generic_readlink(struct dentry *, char __user *, int);
 extern void generic_fillattr(struct inode *, struct kstat *);
 extern int vfs_getattr(struct path *, struct kstat *);
index 7823e9ef995e2beaaa2b8e7a26a3c9619ee2e370..771484993ca7c662e6dc07c115e421050459256a 100644 (file)
@@ -308,36 +308,6 @@ struct fscache_cache_ops {
        void (*dissociate_pages)(struct fscache_cache *cache);
 };
 
-/*
- * data file or index object cookie
- * - a file will only appear in one cache
- * - a request to cache a file may or may not be honoured, subject to
- *   constraints such as disk space
- * - indices are created on disk just-in-time
- */
-struct fscache_cookie {
-       atomic_t                        usage;          /* number of users of this cookie */
-       atomic_t                        n_children;     /* number of children of this cookie */
-       atomic_t                        n_active;       /* number of active users of netfs ptrs */
-       spinlock_t                      lock;
-       spinlock_t                      stores_lock;    /* lock on page store tree */
-       struct hlist_head               backing_objects; /* object(s) backing this file/index */
-       const struct fscache_cookie_def *def;           /* definition */
-       struct fscache_cookie           *parent;        /* parent of this entry */
-       void                            *netfs_data;    /* back pointer to netfs */
-       struct radix_tree_root          stores;         /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
-
-       unsigned long                   flags;
-#define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET     1       /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE     2       /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING    3       /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED    4       /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_RETIRED         5       /* T if cookie was retired */
-};
-
 extern struct fscache_cookie fscache_fsdef_index;
 
 /*
@@ -400,6 +370,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_LIVE         3       /* T if object is not withdrawn or relinquished */
 #define FSCACHE_OBJECT_IS_LOOKED_UP    4       /* T if object has been looked up */
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
+#define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
@@ -511,6 +482,11 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
        op->end_io_func(page, op->context, error);
 }
 
+static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
+{
+       atomic_inc(&cookie->n_active);
+}
+
 /**
  * fscache_use_cookie - Request usage of cookie attached to an object
  * @object: Object description
@@ -524,6 +500,16 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
        return atomic_inc_not_zero(&cookie->n_active) != 0;
 }
 
+static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+{
+       return atomic_dec_and_test(&cookie->n_active);
+}
+
+static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+{
+       wake_up_atomic_t(&cookie->n_active);
+}
+
 /**
  * fscache_unuse_cookie - Cease usage of cookie attached to an object
  * @object: Object description
@@ -534,8 +520,8 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
 static inline void fscache_unuse_cookie(struct fscache_object *object)
 {
        struct fscache_cookie *cookie = object->cookie;
-       if (atomic_dec_and_test(&cookie->n_active))
-               wake_up_atomic_t(&cookie->n_active);
+       if (__fscache_unuse_cookie(cookie))
+               __fscache_wake_unused_cookie(cookie);
 }
 
 /*
index 19b46458e4e88e3e25e55692a00ace4987897441..115bb81912ccc8759a54d206487cc8936e2a90c7 100644 (file)
@@ -166,6 +166,42 @@ struct fscache_netfs {
        struct list_head                link;           /* internal link */
 };
 
+/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ *   constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+       atomic_t                        usage;          /* number of users of this cookie */
+       atomic_t                        n_children;     /* number of children of this cookie */
+       atomic_t                        n_active;       /* number of active users of netfs ptrs */
+       spinlock_t                      lock;
+       spinlock_t                      stores_lock;    /* lock on page store tree */
+       struct hlist_head               backing_objects; /* object(s) backing this file/index */
+       const struct fscache_cookie_def *def;           /* definition */
+       struct fscache_cookie           *parent;        /* parent of this entry */
+       void                            *netfs_data;    /* back pointer to netfs */
+       struct radix_tree_root          stores;         /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
+
+       unsigned long                   flags;
+#define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_NO_DATA_YET     1       /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE     2       /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING    3       /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED    4       /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_ENABLED         5       /* T if cookie is enabled */
+#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6       /* T if cookie is being en/disabled */
+};
+
+static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
+{
+       return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+}
+
 /*
  * slow-path functions for when there is actually caching available, and the
  * netfs does actually have a valid token
@@ -181,8 +217,8 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
 extern struct fscache_cookie *__fscache_acquire_cookie(
        struct fscache_cookie *,
        const struct fscache_cookie_def *,
-       void *);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
+       void *, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
 extern int __fscache_check_consistency(struct fscache_cookie *);
 extern void __fscache_update_cookie(struct fscache_cookie *);
 extern int __fscache_attr_changed(struct fscache_cookie *);
@@ -211,6 +247,9 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
                                              struct inode *);
 extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
                                       struct list_head *pages);
+extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *,
+                                   bool (*)(void *), void *);
 
 /**
  * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -289,6 +328,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
  * @def: A description of the cache object, including callback operations
  * @netfs_data: An arbitrary piece of data to be kept in the cookie to
  * represent the cache object to the netfs
+ * @enable: Whether or not to enable a data cookie immediately
  *
  * This function is used to inform FS-Cache about part of an index hierarchy
  * that can be used to locate files.  This is done by requesting a cookie for
@@ -301,10 +341,12 @@ static inline
 struct fscache_cookie *fscache_acquire_cookie(
        struct fscache_cookie *parent,
        const struct fscache_cookie_def *def,
-       void *netfs_data)
+       void *netfs_data,
+       bool enable)
 {
-       if (fscache_cookie_valid(parent))
-               return __fscache_acquire_cookie(parent, def, netfs_data);
+       if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
+               return __fscache_acquire_cookie(parent, def, netfs_data,
+                                               enable);
        else
                return NULL;
 }
@@ -322,7 +364,7 @@ struct fscache_cookie *fscache_acquire_cookie(
  * description.
  */
 static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 {
        if (fscache_cookie_valid(cookie))
                __fscache_relinquish_cookie(cookie, retire);
@@ -341,7 +383,7 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
 static inline
 int fscache_check_consistency(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_check_consistency(cookie);
        else
                return 0;
@@ -360,7 +402,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
 static inline
 void fscache_update_cookie(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                __fscache_update_cookie(cookie);
 }
 
@@ -407,7 +449,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie)
 static inline
 int fscache_attr_changed(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_attr_changed(cookie);
        else
                return -ENOBUFS;
@@ -429,7 +471,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
 static inline
 void fscache_invalidate(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                __fscache_invalidate(cookie);
 }
 
@@ -503,7 +545,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                               void *context,
                               gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_read_or_alloc_page(cookie, page, end_io_func,
                                                    context, gfp);
        else
@@ -554,7 +596,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
                                void *context,
                                gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_read_or_alloc_pages(cookie, mapping, pages,
                                                     nr_pages, end_io_func,
                                                     context, gfp);
@@ -585,7 +627,7 @@ int fscache_alloc_page(struct fscache_cookie *cookie,
                       struct page *page,
                       gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_alloc_page(cookie, page, gfp);
        else
                return -ENOBUFS;
@@ -634,7 +676,7 @@ int fscache_write_page(struct fscache_cookie *cookie,
                       struct page *page,
                       gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_write_page(cookie, page, gfp);
        else
                return -ENOBUFS;
@@ -744,4 +786,47 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
                __fscache_uncache_all_inode_pages(cookie, inode);
 }
 
+/**
+ * fscache_disable_cookie - Disable a cookie
+ * @cookie: The cookie representing the cache object
+ * @invalidate: Invalidate the backing object
+ *
+ * Disable a cookie from accepting further alloc, read, write, invalidate,
+ * update or acquire operations.  Outstanding operations can still be waited
+ * upon and pages can still be uncached and the cookie relinquished.
+ *
+ * This will not return until all outstanding operations have completed.
+ *
+ * If @invalidate is set, then the backing object will be invalidated and
+ * detached, otherwise it will just be detached.
+ */
+static inline
+void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+{
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+               __fscache_disable_cookie(cookie, invalidate);
+}
+
+/**
+ * fscache_enable_cookie - Reenable a cookie
+ * @cookie: The cookie representing the cache object
+ * @can_enable: A function to permit enablement once lock is held
+ * @data: Data for can_enable()
+ *
+ * Reenable a previously disabled cookie, allowing it to accept further alloc,
+ * read, write, invalidate, update or acquire operations.  An attempt will be
+ * made to immediately reattach the cookie to a backing object.
+ *
+ * The can_enable() function is called (if not NULL) once the enablement lock
+ * is held to rule on whether enablement is still permitted to go ahead.
+ */
+static inline
+void fscache_enable_cookie(struct fscache_cookie *cookie,
+                          bool (*can_enable)(void *data),
+                          void *data)
+{
+       if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
+               __fscache_enable_cookie(cookie, can_enable, data);
+}
+
 #endif /* _LINUX_FSCACHE_H */
index a9df51f5d54c7b01ff0fc8cac9aa0afe803e8727..519b6e2d769ede04d120f6ea3bc5f848e4978148 100644 (file)
@@ -173,6 +173,21 @@ static inline void hash_del_rcu(struct hlist_node *node)
        hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
                member)
 
+/**
+ * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
+ * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ *
+ * This is the same as hash_for_each_possible_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
+       hlist_for_each_entry_rcu_notrace(obj, \
+               &name[hash_min(key, HASH_BITS(name))], member)
+
 /**
  * hash_for_each_possible_safe - iterate over all possible objects hashing to the
  * same bucket safe against removals
index f148e49084106fca84ae61b2880420dc0bc2798d..8ec23fb0b412290c767ab444b564029911ba7786 100644 (file)
@@ -31,11 +31,11 @@ struct hippi_cb {
        __u32   ifield;
 };
 
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_HIPPIDEVICE_H */
index f346e4d5381ca40ccf3bc19e1374c7fdf25fc77c..da0a680e2f6d759d44c3e6e770e60db1dd8b8bc6 100644 (file)
@@ -38,7 +38,7 @@ static inline int vid_to_reg(int val, u8 vrm)
                return ((val >= 1100) && (val <= 1850) ?
                        ((18499 - val * 10) / 25 + 5) / 10 : -1);
        default:
-               return -1;
+               return -EINVAL;
        }
 }
 
index b2514f70d591d799197cdb2c5050bd131641f925..09354f6c1d63de2b5cb9d5dd9547ffc03a145af2 100644 (file)
 #define _HWMON_H_
 
 struct device;
+struct attribute_group;
 
 struct device *hwmon_device_register(struct device *dev);
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                 void *drvdata,
+                                 const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                      void *drvdata,
+                                      const struct attribute_group **groups);
 
 void hwmon_device_unregister(struct device *dev);
+void devm_hwmon_device_unregister(struct device *dev);
 
 #endif
index a3b8b2e2d24438129df020cc8d7af78562123cad..d98503bde7e9bc7ace0e2c3ddc5f2ae019f527ff 100644 (file)
 /*
  * Framework version for util services.
  */
+#define UTIL_FW_MINOR  0
+
+#define UTIL_WS2K8_FW_MAJOR  1
+#define UTIL_WS2K8_FW_VERSION     (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
 
 #define UTIL_FW_MAJOR  3
-#define UTIL_FW_MINOR  0
-#define UTIL_FW_MAJOR_MINOR     (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
+#define UTIL_FW_VERSION     (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
 
 
 /*
index 2ab11dc38077c89f7c60d767ed1096b909ab6039..eff50e062be850189ed9e78a9b6058a5eea49b74 100644 (file)
@@ -205,7 +205,6 @@ struct i2c_driver {
  * @name: Indicates the type of the device, usually a chip name that's
  *     generic enough to hide second-sourcing and compatible revisions.
  * @adapter: manages the bus segment hosting this I2C device
- * @driver: device's driver, hence pointer to access routines
  * @dev: Driver model device node for the slave.
  * @irq: indicates the IRQ generated by this device (if any)
  * @detected: member of an i2c_driver.clients list or i2c-core's
@@ -222,7 +221,6 @@ struct i2c_client {
                                        /* _LOWER_ 7 bits               */
        char name[I2C_NAME_SIZE];
        struct i2c_adapter *adapter;    /* the adapter we sit on        */
-       struct i2c_driver *driver;      /* and our access routines      */
        struct device dev;              /* the device structure         */
        int irq;                        /* irq issued by device         */
        struct list_head detected;
index 79640e015a86c6eb4a8bab2c0d2ee78eaf241558..0d678aefe69df6155b683ce039a42e4b7c4a1cab 100644 (file)
@@ -147,25 +147,27 @@ struct in_ifaddr {
        unsigned long           ifa_tstamp; /* updated timestamp */
 };
 
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
 
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                       struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+                                struct ipv4_devconf *devconf);
 
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
 {
        return __ip_dev_find(net, addr, true);
 }
 
-extern int             inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int             devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void            devinet_init(void);
-extern struct in_device        *inetdev_by_index(struct net *, int);
-extern __be32          inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32          inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+                        int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+                                   __be32 mask);
 
 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
        return rtnl_dereference(dev->ip_ptr);
 }
 
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
 
 static inline void in_dev_put(struct in_device *idev)
 {
index 78e2ada50cd5a95c7b7527777a870428afa5352c..d380c5e680086ec4627c745f86dc197e7110ea58 100644 (file)
@@ -55,7 +55,7 @@
 #define DMAR_IQT_REG   0x88    /* Invalidation queue tail register */
 #define DMAR_IQ_SHIFT  4       /* Invalidation queue head/tail shift */
 #define DMAR_IQA_REG   0x90    /* Invalidation queue addr register */
-#define DMAR_ICS_REG   0x98    /* Invalidation complete status register */
+#define DMAR_ICS_REG   0x9c    /* Invalidation complete status register */
 #define DMAR_IRTA_REG  0xb8    /* Interrupt remapping table addr register */
 
 #define OFFSET_STRIDE          (9)
index 28ea38439313226861c5c6df902b7b92c3cb5969..a80a63cfb70c51cb911ff54293ac447d551c45e5 100644 (file)
@@ -115,16 +115,8 @@ static inline int inet6_iif(const struct sk_buff *skb)
        return IP6CB(skb)->iif;
 }
 
-struct inet6_request_sock {
-       struct in6_addr         loc_addr;
-       struct in6_addr         rmt_addr;
-       struct sk_buff          *pktopts;
-       int                     iif;
-};
-
 struct tcp6_request_sock {
        struct tcp_request_sock   tcp6rsk_tcp;
-       struct inet6_request_sock tcp6rsk_inet6;
 };
 
 struct ipv6_mc_socklist;
@@ -141,8 +133,6 @@ struct ipv6_fl_socklist;
  */
 struct ipv6_pinfo {
        struct in6_addr         saddr;
-       struct in6_addr         rcv_saddr;
-       struct in6_addr         daddr;
        struct in6_pktinfo      sticky_pktinfo;
        const struct in6_addr           *daddr_cache;
 #ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +246,22 @@ struct tcp6_sock {
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-struct inet6_timewait_sock {
-       struct in6_addr tw_v6_daddr;
-       struct in6_addr tw_v6_rcv_saddr;
-};
-
 struct tcp6_timewait_sock {
        struct tcp_timewait_sock   tcp6tw_tcp;
-       struct inet6_timewait_sock tcp6tw_inet6;
 };
 
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
-       return (struct inet6_timewait_sock *)(((u8 *)sk) +
-                                             inet_twsk(sk)->tw_ipv6_offset);
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
        return inet_sk(__sk)->pinet6;
 }
 
-static inline struct inet6_request_sock *
-                       inet6_rsk(const struct request_sock *rsk)
-{
-       return (struct inet6_request_sock *)(((u8 *)rsk) +
-                                            inet_rsk(rsk)->inet6_rsk_offset);
-}
-
-static inline u32 inet6_rsk_offset(struct request_sock *rsk)
-{
-       return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
-}
-
 static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
 {
        struct request_sock *req = reqsk_alloc(ops);
 
-       if (req != NULL) {
-               inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
-               inet6_rsk(req)->pktopts = NULL;
-       }
+       if (req)
+               inet_rsk(req)->pktopts = NULL;
 
        return req;
 }
@@ -321,21 +285,11 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 #define __ipv6_only_sock(sk)   (inet6_sk(sk)->ipv6only)
 #define ipv6_only_sock(sk)     ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
 
-static inline u16 inet6_tw_offset(const struct proto *prot)
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
 {
-       return prot->twsk_prot->twsk_obj_size -
-                       sizeof(struct inet6_timewait_sock);
-}
-
-static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
-{
-       return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
-{
-       return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
+       if (sk->sk_family == AF_INET6)
+               return &sk->sk_v6_rcv_saddr;
+       return NULL;
 }
 
 static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +317,18 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
        return NULL;
 }
 
-#define __inet6_rcv_saddr(__sk)        NULL
 #define inet6_rcv_saddr(__sk)  NULL
 #define tcp_twsk_ipv6only(__sk)                0
 #define inet_v6_ipv6only(__sk)         0
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
         ((__sk)->sk_family == AF_INET6)                        &&      \
-        ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr))     &&      \
-        ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) &&      \
+        ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))               &&      \
+        ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))   &&      \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 
-#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))                    && \
-        ((__sk)->sk_family == AF_INET6)                                && \
-        ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))     && \
-        ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
-        (!(__sk)->sk_bound_dev_if      ||                                 \
-         ((__sk)->sk_bound_dev_if == (__dif)))                         && \
-        net_eq(sock_net(__sk), (__net)))
-
 #endif /* _IPV6_H */
index 0e5d9ecdb2b672d901b47f184a4b720e604317e2..cac496b1e279293164066ea0204c87309169bd49 100644 (file)
@@ -31,6 +31,8 @@
 #define GIC_DIST_TARGET                        0x800
 #define GIC_DIST_CONFIG                        0xc00
 #define GIC_DIST_SOFTINT               0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR     0xf10
+#define GIC_DIST_SGI_PENDING_SET       0xf20
 
 #define GICH_HCR                       0x0
 #define GICH_VTR                       0x4
@@ -74,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
        gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
 #endif /* __ASSEMBLY */
 
 #endif
index a5079072da663e6b1e426504c3dfa39438e42127..e96be7245717f723486b8ad1bf6d2795dd4bed73 100644 (file)
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized,                 \
+                                   "%s used before call to jump_label_init", \
+                                   __func__)
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
@@ -128,6 +135,7 @@ struct static_key {
 
 static __always_inline void jump_label_init(void)
 {
+       static_key_initialized = true;
 }
 
 static __always_inline bool static_key_false(struct static_key *key)
@@ -146,11 +154,13 @@ static __always_inline bool static_key_true(struct static_key *key)
 
 static inline void static_key_slow_inc(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        atomic_inc(&key->enabled);
 }
 
 static inline void static_key_slow_dec(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        atomic_dec(&key->enabled);
 }
 
index 113788389b3dc25c2de62c32268da828719c7d68..089f70f83e97c9a1adf1087f10d095a02bd3e152 100644 (file)
@@ -23,12 +23,14 @@ struct static_key_deferred {
 };
 static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+       STATIC_KEY_CHECK_USE();
        static_key_slow_dec(&key->key);
 }
 static inline void
 jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
+       STATIC_KEY_CHECK_USE();
 }
 #endif /* HAVE_JUMP_LABEL */
 #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
index 482ad2d84a32dae333c74cccb2d1394b247d5ae8..672ddc4de4af0511b20a4d269bcae1c517c3d5c4 100644 (file)
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
        return buf;
 }
 
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x)    hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x)    hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+       *buf++ = hex_asc_upper_hi(byte);
+       *buf++ = hex_asc_upper_lo(byte);
+       return buf;
+}
+
 static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
 {
        return hex_byte_pack(buf, byte);
index f279ed9a91631cca7f236d20ef5a29d152332932..13dfd36a329474df228102dd49bafbdf1b5c687d 100644 (file)
@@ -36,4 +36,10 @@ extern int lockref_put_or_lock(struct lockref *);
 extern void lockref_mark_dead(struct lockref *);
 extern int lockref_get_not_dead(struct lockref *);
 
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+       return ((int)l->count < 0);
+}
+
 #endif /* __LINUX_LOCKREF_H */
index 60e95872da2983365300f1abb193b6e81974b5de..b3e7a667e03c24ca5c3d1c54c0db5c7b0bdde052 100644 (file)
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
        unsigned int generation;
 };
 
-enum mem_cgroup_filter_t {
-       VISIT,          /* visit current node */
-       SKIP,           /* skip the current node and continue traversal */
-       SKIP_TREE,      /* skip the whole subtree and continue traversal */
-};
-
-/*
- * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
- * iterate through the hierarchy tree. Each tree element is checked by the
- * predicate before it is returned by the iterator. If a filter returns
- * SKIP or SKIP_TREE then the iterator code continues traversal (with the
- * next node down the hierarchy or the next node that doesn't belong under the
- * memcg's subtree).
- */
-typedef enum mem_cgroup_filter_t
-(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
-
 #ifdef CONFIG_MEMCG
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
        struct page *oldpage, struct page *newpage, bool migration_ok);
 
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond);
-
-static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim)
-{
-       return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
-}
-
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+                                  struct mem_cgroup *,
+                                  struct mem_cgroup_reclaim_cookie *);
 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 
 /*
@@ -163,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
-/**
- * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
- * @new: true to enable, false to disable
- *
- * Toggle whether a failed memcg charge should invoke the OOM killer
- * or just return -ENOMEM.  Returns the previous toggle state.
- *
- * NOTE: Any path that enables the OOM killer before charging must
- *       call mem_cgroup_oom_synchronize() afterward to finalize the
- *       OOM handling and clean up.
- */
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
 {
-       bool old;
-
-       old = current->memcg_oom.may_oom;
-       current->memcg_oom.may_oom = new;
-
-       return old;
+       WARN_ON(current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 1;
 }
 
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
-       bool old = mem_cgroup_toggle_oom(true);
-
-       WARN_ON(old == true);
-}
-
-static inline void mem_cgroup_disable_oom(void)
-{
-       bool old = mem_cgroup_toggle_oom(false);
-
-       WARN_ON(old == false);
+       WARN_ON(!current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 0;
 }
 
 static inline bool task_in_memcg_oom(struct task_struct *p)
 {
-       return p->memcg_oom.in_memcg_oom;
+       return p->memcg_oom.memcg;
 }
 
-bool mem_cgroup_oom_synchronize(void);
+bool mem_cgroup_oom_synchronize(bool wait);
 
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
@@ -260,9 +211,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
        mem_cgroup_update_page_stat(page, idx, -1);
 }
 
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                               gfp_t gfp_mask,
+                                               unsigned long *total_scanned);
 
 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +327,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
                struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 }
-static inline struct mem_cgroup *
-mem_cgroup_iter_cond(struct mem_cgroup *root,
-               struct mem_cgroup *prev,
-               struct mem_cgroup_reclaim_cookie *reclaim,
-               mem_cgroup_iter_filter cond)
-{
-       /* first call must return non-NULL, second return NULL */
-       return (struct mem_cgroup *)(unsigned long)!prev;
-}
 
 static inline struct mem_cgroup *
 mem_cgroup_iter(struct mem_cgroup *root,
@@ -437,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
 {
-       return false;
 }
 
-static inline void mem_cgroup_enable_oom(void)
-{
-}
-
-static inline void mem_cgroup_disable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
 }
 
@@ -455,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
        return false;
 }
 
-static inline bool mem_cgroup_oom_synchronize(void)
+static inline bool mem_cgroup_oom_synchronize(bool wait)
 {
        return false;
 }
@@ -471,11 +408,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
 }
 
 static inline
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
 {
-       return VISIT;
+       return 0;
 }
 
 static inline void mem_cgroup_split_huge_fixup(struct page *head)
index b6bdcd66c07d2e758ba677f9c9c1796ada542bc8..e00e9f362fd50641e3dbf79722e06cfa9a752fb5 100644 (file)
 
 #define IMX6Q_GPR5_L2_CLK_STOP                 BIT(8)
 
+#define IMX6Q_GPR8_TX_SWING_LOW                        (0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL               (0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB          (0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB                (0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1              (0x3f << 0)
+
 #define IMX6Q_GPR9_TZASC2_BYP                  BIT(1)
 #define IMX6Q_GPR9_TZASC1_BYP                  BIT(0)
 
 #define IMX6Q_GPR12_ARMP_AHB_CLK_EN            BIT(26)
 #define IMX6Q_GPR12_ARMP_ATB_CLK_EN            BIT(25)
 #define IMX6Q_GPR12_ARMP_APB_CLK_EN            BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE                        (0xf << 12)
 #define IMX6Q_GPR12_PCIE_CTL_2                 BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL                  (0x1f << 4)
 
 #define IMX6Q_GPR13_SDMA_STOP_REQ              BIT(30)
 #define IMX6Q_GPR13_CAN2_STOP_REQ              BIT(29)
index 09c2300ddb3723188636867fa9b5c21bddab47df..f7eaf2d60083a5c7d216456e631b1f2b56ea7892 100644 (file)
@@ -31,6 +31,7 @@
 #define I2O_MINOR              166
 #define MICROCODE_MINOR                184
 #define TUN_MINOR              200
+#define CUSE_MINOR             203
 #define MWAVE_MINOR            219     /* ACP/Mwave Modem */
 #define MPT_MINOR              220
 #define MPT2SAS_MINOR          221
@@ -45,6 +46,7 @@
 #define MAPPER_CTRL_MINOR      236
 #define LOOP_CTRL_MINOR                237
 #define VHOST_NET_MINOR                238
+#define UHID_MINOR             239
 #define MISC_DYNAMIC_MINOR     255
 
 struct device;
index cd1fdf75103b7bd26350490cffc7de8e054bc366..8df61bc5da00ff9d751727c42ee93b2c6ad7de9f 100644 (file)
@@ -154,10 +154,6 @@ enum {
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
        MLX4_CMD_SET_IF_STAT     = 0X55,
 
-       /* set port opcode modifiers */
-       MLX4_SET_PORT_PRIO2TC = 0x8,
-       MLX4_SET_PORT_SCHEDULER  = 0x9,
-
        /* register/delete flow steering network rules */
        MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
        MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@ enum {
        MLX4_SET_PORT_VLAN_TABLE = 0x3,
        MLX4_SET_PORT_PRIO_MAP  = 0x4,
        MLX4_SET_PORT_GID_TABLE = 0x5,
+       MLX4_SET_PORT_PRIO2TC   = 0x8,
+       MLX4_SET_PORT_SCHEDULER = 0x9,
 };
 
 enum {
index 24ce6bdd540ef65f1a8560ae6f089a0df1e5aeb1..9ad0c18495ad059e57ad77bef5a9579bb581a066 100644 (file)
@@ -155,7 +155,7 @@ enum {
        MLX4_DEV_CAP_FLAG2_RSS_TOP              = 1LL <<  1,
        MLX4_DEV_CAP_FLAG2_RSS_XOR              = 1LL <<  2,
        MLX4_DEV_CAP_FLAG2_FS_EN                = 1LL <<  3,
-       MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN     = 1LL <<  4,
+       MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN      = 1LL <<  4,
        MLX4_DEV_CAP_FLAG2_TS                   = 1LL <<  5,
        MLX4_DEV_CAP_FLAG2_VLAN_CONTROL         = 1LL <<  6,
        MLX4_DEV_CAP_FLAG2_FSM                  = 1LL <<  7,
index 68029b30c3dc89e2d2620fd41101b2ab05069344..5eb4e31af22b8e05356dfef793c1b96497af8cb3 100644 (file)
@@ -181,7 +181,7 @@ enum {
        MLX5_DEV_CAP_FLAG_TLP_HINTS     = 1LL << 39,
        MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
        MLX5_DEV_CAP_FLAG_DCT           = 1LL << 41,
-       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 1LL << 46,
+       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 3LL << 46,
 };
 
 enum {
@@ -417,7 +417,7 @@ struct mlx5_init_seg {
        struct health_buffer    health;
        __be32                  rsvd2[884];
        __be32                  health_counter;
-       __be32                  rsvd3[1023];
+       __be32                  rsvd3[1019];
        __be64                  ieee1588_clk;
        __be32                  ieee1588_clk_type;
        __be32                  clr_intx;
index 8888381fc150b8f3f852077407ee8187a54cb7aa..6b8c496572c841d8ec8be70740557f1caf50b79a 100644 (file)
@@ -82,7 +82,7 @@ enum {
 };
 
 enum {
-       MLX5_MAX_EQ_NAME        = 20
+       MLX5_MAX_EQ_NAME        = 32
 };
 
 enum {
@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
 
 enum {
        MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
-       MLX5_PROF_MASK_CMDIF_CSUM       = (u64)1 << 1,
-       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 2,
+       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
 };
 
 enum {
@@ -758,7 +757,6 @@ enum {
 struct mlx5_profile {
        u64     mask;
        u32     log_max_qp;
-       int     cmdif_csum;
        struct {
                int     size;
                int     limit;
index 8b6e55ee885576d3dc3613a23a990cd20da4bfed..1a0668e5a4eef0377b708b9aded0e11f8da67a80 100644 (file)
@@ -297,12 +297,26 @@ static inline int put_page_testzero(struct page *page)
 /*
  * Try to grab a ref unless the page has a refcount of zero, return false if
  * that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
  */
 static inline int get_page_unless_zero(struct page *page)
 {
        return atomic_inc_not_zero(&page->_count);
 }
 
+/*
+ * Try to drop a ref unless the page has a refcount of one, return false if
+ * that is the case.
+ * This is to make sure that the refcount won't become zero after this drop.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int put_page_unless_one(struct page *page)
+{
+       return atomic_add_unless(&page->_count, -1, 1);
+}
+
 extern int page_is_ram(unsigned long pfn);
 
 /* Support for virtually mapped pages */
index ccd4260834c57da2ea30fc5b20714ef15afe076f..bab49da8a0f0b1bd2e01d516fdd0e39a07162326 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/spinlock_types.h>
 #include <linux/linkage.h>
 #include <linux/lockdep.h>
-
 #include <linux/atomic.h>
+#include <asm/processor.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
-#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
-#define arch_mutex_cpu_relax() cpu_relax()
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
 #endif
index 4f27575ce1d67ebe74d1bba4f40a70f6edbdea11..aca446b46754fe681dcb0fb99028ac62c90a68fe 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fcntl.h>       /* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
+#include <linux/jump_label.h>
 #include <uapi/linux/net.h>
 
 struct poll_table_struct;
@@ -195,27 +196,23 @@ enum {
        SOCK_WAKE_URG,
 };
 
-extern int          sock_wake_async(struct socket *sk, int how, int band);
-extern int          sock_register(const struct net_proto_family *fam);
-extern void         sock_unregister(int family);
-extern int          __sock_create(struct net *net, int family, int type, int proto,
-                                struct socket **res, int kern);
-extern int          sock_create(int family, int type, int proto,
-                                struct socket **res);
-extern int          sock_create_kern(int family, int type, int proto,
-                                     struct socket **res);
-extern int          sock_create_lite(int family, int type, int proto,
-                                     struct socket **res); 
-extern void         sock_release(struct socket *sock);
-extern int          sock_sendmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t len);
-extern int          sock_recvmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t size, int flags);
-extern struct file  *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+                 struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
 #define                     sockfd_put(sock) fput(sock->file)
-extern int          net_ratelimit(void);
+int net_ratelimit(void);
 
 #define net_ratelimited_function(function, ...)                        \
 do {                                                           \
@@ -243,32 +240,53 @@ do {                                                              \
 #define net_random()           prandom_u32()
 #define net_srandom(seed)      prandom_seed((__force u32)(seed))
 
-extern int          kernel_sendmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num, size_t len);
-extern int          kernel_recvmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num,
-                                   size_t len, int flags);
-
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
-                      int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
-                        int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
-                         int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
-                            char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
-                            char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
-                          size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
-                               enum sock_shutdown_cmd how);
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+                          struct static_key *done_key);
+
+#ifdef HAVE_JUMP_LABEL
+#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
+               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
+#else /* !HAVE_JUMP_LABEL */
+#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#endif /* HAVE_JUMP_LABEL */
+
+/* BE CAREFUL: this function is not interrupt safe */
+#define net_get_random_once(buf, nbytes)                               \
+       ({                                                              \
+               bool ___ret = false;                                    \
+               static bool ___done = false;                            \
+               static struct static_key ___done_key =                  \
+                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
+               if (!static_key_true(&___done_key))                     \
+                       ___ret = __net_get_random_once(buf,             \
+                                                      nbytes,          \
+                                                      &___done,        \
+                                                      &___done_key);   \
+               ___ret;                                                 \
+       })
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+                  int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+                     int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+                     unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+                   size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
 #define MODULE_ALIAS_NETPROTO(proto) \
        MODULE_ALIAS("net-pf-" __stringify(proto))
index a2a89a5c7be55b15baec6271a8cc4329b6f642bd..b05a4b501ab50f54fbd5d30e54a8086fcb964392 100644 (file)
@@ -42,6 +42,8 @@ enum {
        NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
        NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
+       NETIF_F_GSO_IPIP_BIT,           /* ... IPIP tunnel with TSO */
+       NETIF_F_GSO_SIT_BIT,            /* ... SIT tunnel with TSO */
        NETIF_F_GSO_UDP_TUNNEL_BIT,     /* ... UDP TUNNEL with TSO */
        NETIF_F_GSO_MPLS_BIT,           /* ... MPLS segmentation */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
@@ -107,6 +109,8 @@ enum {
 #define NETIF_F_RXFCS          __NETIF_F(RXFCS)
 #define NETIF_F_RXALL          __NETIF_F(RXALL)
 #define NETIF_F_GSO_GRE                __NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_IPIP       __NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT                __NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_MPLS       __NETIF_F(GSO_MPLS)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
index 3de49aca451970a738b5ae55cfd74656248ac9ea..27f62f746621a8758b7beb60b8b530604aa0fb74 100644 (file)
@@ -60,8 +60,8 @@ struct wireless_dev;
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
 
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
-                                          const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                   const struct ethtool_ops *ops);
 
 /* hardware address assignment types */
 #define NET_ADDR_PERM          0       /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
 };
 #define NETDEV_BOOT_SETUP_MAX 8
 
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
 
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
 typedef enum rx_handler_result rx_handler_result_t;
 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
 
 static inline bool napi_disable_pending(struct napi_struct *n)
 {
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
  *
  * Mark NAPI processing as complete.
  */
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
 
 /**
  *     napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
  * lookup @napi_id in napi_hash table
  * must be called under rcu_read_lock()
  */
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
 
 /**
  *     napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
  *
  * generate a new napi_id and store a @napi under it in napi_hash
  */
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
 
 /**
  *     napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
  * Warning: caller must observe rcu grace period
  * before freeing memory containing @napi
  */
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
 
 /**
  *     napi_disable - prevent NAPI from scheduling
@@ -664,8 +664,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 #ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
-                               u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+                        u16 filter_id);
 #endif
 
 /* This structure contains an instance of an RX queue. */
@@ -1143,8 +1143,19 @@ struct net_device {
        struct list_head        dev_list;
        struct list_head        napi_list;
        struct list_head        unreg_list;
-       struct list_head        upper_dev_list; /* List of upper devices */
-       struct list_head        lower_dev_list;
+       struct list_head        close_list;
+
+       /* directly linked devices, like slaves for bonding */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } adj_list;
+
+       /* all linked devices, *including* neighbours */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } all_adj_list;
 
 
        /* currently active device features */
@@ -1487,9 +1498,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                          struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
  * Net namespace inlines
@@ -1673,8 +1684,8 @@ struct packet_offload {
 #define NETDEV_CHANGEUPPER     0x0015
 #define NETDEV_RESEND_IGMP     0x0016
 
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
 
 struct netdev_notifier_info {
        struct net_device *dev;
@@ -1697,9 +1708,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
        return info->dev;
 }
 
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
-                                        struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+                                 struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 
 extern rwlock_t                                dev_base_lock;          /* Device list lock */
@@ -1754,54 +1765,52 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
-extern int                     netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long           netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
-                                             const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void            dev_add_pack(struct packet_type *pt);
-extern void            dev_remove_pack(struct packet_type *pt);
-extern void            __dev_remove_pack(struct packet_type *pt);
-extern void            dev_add_offload(struct packet_offload *po);
-extern void            dev_remove_offload(struct packet_offload *po);
-extern void            __dev_remove_offload(struct packet_offload *po);
-
-extern struct net_device       *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
-                                                     unsigned short mask);
-extern struct net_device       *dev_get_by_name(struct net *net, const char *name);
-extern struct net_device       *dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device       *__dev_get_by_name(struct net *net, const char *name);
-extern int             dev_alloc_name(struct net_device *dev, const char *name);
-extern int             dev_open(struct net_device *dev);
-extern int             dev_close(struct net_device *dev);
-extern void            dev_disable_lro(struct net_device *dev);
-extern int             dev_loopback_xmit(struct sk_buff *newskb);
-extern int             dev_queue_xmit(struct sk_buff *skb);
-extern int             register_netdevice(struct net_device *dev);
-extern void            unregister_netdevice_queue(struct net_device *dev,
-                                                  struct list_head *head);
-extern void            unregister_netdevice_many(struct list_head *head);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+                                      const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
+
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+                                       unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
 static inline void unregister_netdevice(struct net_device *dev)
 {
        unregister_netdevice_queue(dev, NULL);
 }
 
-extern int             netdev_refcnt_read(const struct net_device *dev);
-extern void            free_netdev(struct net_device *dev);
-extern void            synchronize_net(void);
-extern int             init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
 
-extern struct net_device       *dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int             netdev_get_name(struct net *net, char *name, int ifindex);
-extern int             dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
-extern int             netpoll_trap(void);
+int netpoll_trap(void);
 #endif
-extern int            skb_gro_receive(struct sk_buff **head,
-                                      struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
@@ -1873,7 +1882,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
 }
 
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int             register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
 {
        return register_gifconf(family, NULL);
@@ -1944,7 +1953,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
@@ -2264,11 +2273,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 }
 
 #ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
-                              u16 index);
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index);
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
-                                     struct cpumask *mask,
+                                     const struct cpumask *mask,
                                      u16 index)
 {
        return 0;
@@ -2296,12 +2305,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
        return dev->num_tx_queues > 1;
 }
 
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
-                                       unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 
 #ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
-                                       unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 #else
 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
                                                unsigned int rxq)
@@ -2328,28 +2335,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 }
 
 #define DEFAULT_MAX_NUM_RSS_QUEUES     (8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
 
 /* Use this variant when it is known for sure that it
  * is executing from hardware interrupt context or with hardware interrupts
  * disabled.
  */
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
 
 /* Use this variant in places where it could be invoked
  * from either hardware interrupt or other context, with hardware interrupts
  * either disabled or enabled.
  */
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
 
-extern int             netif_rx(struct sk_buff *skb);
-extern int             netif_rx_ni(struct sk_buff *skb);
-extern int             netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t    napi_gro_receive(struct napi_struct *napi,
-                                        struct sk_buff *skb);
-extern void            napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff *        napi_get_frags(struct napi_struct *napi);
-extern gro_result_t    napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
@@ -2357,40 +2363,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
-extern int netdev_rx_handler_register(struct net_device *dev,
-                                     rx_handler_func_t *rx_handler,
-                                     void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
-
-extern bool            dev_valid_name(const char *name);
-extern int             dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int             dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int    dev_get_flags(const struct net_device *);
-extern int             __dev_change_flags(struct net_device *, unsigned int flags);
-extern int             dev_change_flags(struct net_device *, unsigned int);
-extern void            __dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int             dev_change_name(struct net_device *, const char *);
-extern int             dev_set_alias(struct net_device *, const char *, size_t);
-extern int             dev_change_net_namespace(struct net_device *,
-                                                struct net *, const char *);
-extern int             dev_set_mtu(struct net_device *, int);
-extern void            dev_set_group(struct net_device *, int);
-extern int             dev_set_mac_address(struct net_device *,
-                                           struct sockaddr *);
-extern int             dev_change_carrier(struct net_device *,
-                                          bool new_carrier);
-extern int             dev_get_phys_port_id(struct net_device *dev,
-                                            struct netdev_phys_port_id *ppid);
-extern int             dev_hard_start_xmit(struct sk_buff *skb,
-                                           struct net_device *dev,
-                                           struct netdev_queue *txq);
-extern int             dev_forward_skb(struct net_device *dev,
-                                       struct sk_buff *skb);
+int netdev_rx_handler_register(struct net_device *dev,
+                              rx_handler_func_t *rx_handler,
+                              void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+                       unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+                        struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                       struct netdev_queue *txq);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 
 extern int             netdev_budget;
 
 /* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
 
 /**
  *     dev_put - release reference to device
@@ -2423,9 +2425,9 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
 
 /**
  *     netif_carrier_ok - test if carrier present
@@ -2438,13 +2440,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 }
 
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
 
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
 
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
 
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
 
 /**
  *     netif_dormant_on - mark device as dormant.
@@ -2512,9 +2514,9 @@ static inline bool netif_device_present(struct net_device *dev)
        return test_bit(__LINK_STATE_PRESENT, &dev->state);
 }
 
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
 
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
 
 /*
  * Network interface message level settings
@@ -2723,119 +2725,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 
-extern void            ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
-                                      void (*setup)(struct net_device *),
-                                      unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+                                   void (*setup)(struct net_device *),
+                                   unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
        alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 
 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
        alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 
-extern int             register_netdev(struct net_device *dev);
-extern void            unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
 
 /* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
-                                 struct netdev_hw_addr_list *from_list,
-                                 int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
-                                  struct netdev_hw_addr_list *from_list,
-                                  int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
-                         struct netdev_hw_addr_list *from_list,
-                         int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
-                            struct netdev_hw_addr_list *from_list,
-                            int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+                          struct netdev_hw_addr_list *from_list,
+                          int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+                           struct netdev_hw_addr_list *from_list,
+                           int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+                  struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+                     struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
 
 /* Functions used for secondary unicast and multicast support */
-extern void            dev_set_rx_mode(struct net_device *dev);
-extern void            __dev_set_rx_mode(struct net_device *dev);
-extern int             dev_set_promiscuity(struct net_device *dev, int inc);
-extern int             dev_set_allmulti(struct net_device *dev, int inc);
-extern void            netdev_state_change(struct net_device *dev);
-extern void            netdev_notify_peers(struct net_device *dev);
-extern void            netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
-extern void            dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
-                                   const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+                                       struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+                            const struct net_device_stats *netdev_stats);
 
 extern int             netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
 extern int             bpf_jit_enable;
 
-extern bool netdev_has_upper_dev(struct net_device *dev,
-                                struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                       struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 
 /* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
-       for (iter = &(dev)->upper_dev_list, \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
-            upper; \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->all_adj_list.upper, \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+       for (iter = (dev)->adj_list.lower.next, \
+            priv = netdev_lower_get_next_private(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
-                                       struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
-                                   struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-       netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-                                         netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+                            struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+                                 netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features);
 
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2857,30 +2878,30 @@ static inline bool can_checksum_protocol(netdev_features_t features,
 }
 
 #ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
 #else
 static inline void netdev_rx_csum_fault(struct net_device *dev)
 {
 }
 #endif
 /* rx skb timestamps */
-extern void            net_enable_timestamp(void);
-extern void            net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
 
 #ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
 #else
 #define dev_proc_init() 0
 #endif
 
-extern int netdev_class_create_file(struct class_attribute *class_attr);
-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+int netdev_class_create_file(struct class_attribute *class_attr);
+void netdev_class_remove_file(struct class_attribute *class_attr);
 
 extern struct kobj_ns_type_operations net_ns_type_operations;
 
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
 
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
 
 static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
@@ -2972,22 +2993,22 @@ static inline const char *netdev_name(const struct net_device *dev)
        return dev->name;
 }
 
-extern __printf(3, 4)
+__printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_info(const struct net_device *dev, const char *format, ...);
 
 #define MODULE_ALIAS_NETDEV(device) \
@@ -3028,7 +3049,7 @@ do {                                                              \
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)                      \
-       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
 
index 708fe72ab913ba71e5421b080d0c9a8c90c7a3d9..2077489f98873bcbe4ca083cd1f0eb1b99eeab5e 100644 (file)
@@ -35,14 +35,15 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
        result->all[3] = a1->all[3] & mask->all[3];
 }
 
-extern int netfilter_init(void);
+int netfilter_init(void);
 
 /* Largest hook number + 1 */
 #define NF_MAX_HOOKS 8
 
 struct sk_buff;
 
-typedef unsigned int nf_hookfn(unsigned int hooknum,
+struct nf_hook_ops;
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
                               struct sk_buff *skb,
                               const struct net_device *in,
                               const struct net_device *out,
@@ -52,12 +53,13 @@ struct nf_hook_ops {
        struct list_head list;
 
        /* User fills in from here down. */
-       nf_hookfn *hook;
-       struct module *owner;
-       u_int8_t pf;
-       unsigned int hooknum;
+       nf_hookfn       *hook;
+       struct module   *owner;
+       void            *priv;
+       u_int8_t        pf;
+       unsigned int    hooknum;
        /* Hooks are ordered in ascending priority. */
-       int priority;
+       int             priority;
 };
 
 struct nf_sockopt_ops {
@@ -208,7 +210,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
 /* Call this before modifying an existing packet: ensures it is
    modifiable and linear to the point you care about (writable_len).
    Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 
 struct flowi;
 struct nf_queue_entry;
@@ -269,8 +271,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 }
 
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 
 struct nf_conn;
index 9ac9fbde7b61a38795d631ab545b9bd3140fe6f6..7967516adc0d1ba2335111e863cc8c1899050bc3 100644 (file)
@@ -49,31 +49,68 @@ enum ip_set_feature {
 
 /* Set extensions */
 enum ip_set_extension {
-       IPSET_EXT_NONE = 0,
-       IPSET_EXT_BIT_TIMEOUT = 1,
+       IPSET_EXT_BIT_TIMEOUT = 0,
        IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
-       IPSET_EXT_BIT_COUNTER = 2,
+       IPSET_EXT_BIT_COUNTER = 1,
        IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
-};
-
-/* Extension offsets */
-enum ip_set_offset {
-       IPSET_OFFSET_TIMEOUT = 0,
-       IPSET_OFFSET_COUNTER,
-       IPSET_OFFSET_MAX,
+       IPSET_EXT_BIT_COMMENT = 2,
+       IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+       /* Mark set with an extension which needs to call destroy */
+       IPSET_EXT_BIT_DESTROY = 7,
+       IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
 };
 
 #define SET_WITH_TIMEOUT(s)    ((s)->extensions & IPSET_EXT_TIMEOUT)
 #define SET_WITH_COUNTER(s)    ((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s)    ((s)->extensions & IPSET_EXT_COMMENT)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+       IPSET_EXT_ID_COUNTER = 0,
+       IPSET_EXT_ID_TIMEOUT,
+       IPSET_EXT_ID_COMMENT,
+       IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+       /* Destroy extension private data (can be NULL) */
+       void (*destroy)(void *ext);
+       enum ip_set_extension type;
+       enum ipset_cadt_flags flag;
+       /* Size and minimal alignment */
+       u8 len;
+       u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
 
 struct ip_set_ext {
-       unsigned long timeout;
        u64 packets;
        u64 bytes;
+       u32 timeout;
+       char *comment;
+};
+
+struct ip_set_counter {
+       atomic64_t bytes;
+       atomic64_t packets;
+};
+
+struct ip_set_comment {
+       char *str;
 };
 
 struct ip_set;
 
+#define ext_timeout(e, s)      \
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s)      \
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s)      \
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+
+
 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
                           const struct ip_set_ext *ext,
                           struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@ struct ip_set_type {
        u8 revision_min, revision_max;
 
        /* Create set */
-       int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+       int (*create)(struct net *net, struct ip_set *set,
+                     struct nlattr *tb[], u32 flags);
 
        /* Attribute policies */
        const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@ struct ip_set {
        u8 revision;
        /* Extensions */
        u8 extensions;
+       /* Default timeout value, if enabled */
+       u32 timeout;
+       /* Element data size */
+       size_t dsize;
+       /* Offsets to extensions in elements */
+       size_t offset[IPSET_EXT_ID_MAX];
        /* The type specific data */
        void *data;
 };
 
-struct ip_set_counter {
-       atomic64_t bytes;
-       atomic64_t packets;
-};
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+       /* Check that the extension is enabled for the set and
+        * call it's destroy function for its extension part in data.
+        */
+       if (SET_WITH_COMMENT(set))
+               ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+                       ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+       u32 cadt_flags = 0;
+
+       if (SET_WITH_TIMEOUT(set))
+               if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                                          htonl(set->timeout))))
+                       return -EMSGSIZE;
+       if (SET_WITH_COUNTER(set))
+               cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+       if (SET_WITH_COMMENT(set))
+               cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+
+       if (!cadt_flags)
+               return 0;
+       return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
 
 static inline void
 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -248,12 +317,13 @@ ip_set_init_counter(struct ip_set_counter *counter,
 }
 
 /* register and unregister set references */
-extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
-extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char *ip_set_name_byindex(ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(const char *name);
-extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
-extern void ip_set_nfnl_put(ip_set_id_t index);
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+                                    const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
 
 /* API for iptables set match, and SET target */
 
@@ -272,6 +342,8 @@ extern void *ip_set_alloc(size_t size);
 extern void ip_set_free(void *members);
 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+                             size_t len);
 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                                 struct ip_set_ext *ext);
 
@@ -389,13 +461,40 @@ bitmap_bytes(u32 a, u32 b)
 }
 
 #include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
 
-#define IP_SET_INIT_KEXT(skb, opt, map)                        \
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+                     const void *e, bool active)
+{
+       if (SET_WITH_TIMEOUT(set)) {
+               unsigned long *timeout = ext_timeout(e, set);
+
+               if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                       htonl(active ? ip_set_timeout_get(timeout)
+                               : *timeout)))
+                       return -EMSGSIZE;
+       }
+       if (SET_WITH_COUNTER(set) &&
+           ip_set_put_counter(skb, ext_counter(e, set)))
+               return -EMSGSIZE;
+       if (SET_WITH_COMMENT(set) &&
+           ip_set_put_comment(skb, ext_comment(e, set)))
+               return -EMSGSIZE;
+       return 0;
+}
+
+#define IP_SET_INIT_KEXT(skb, opt, set)                        \
        { .bytes = (skb)->len, .packets = 1,            \
-         .timeout = ip_set_adt_opt_timeout(opt, map) }
+         .timeout = ip_set_adt_opt_timeout(opt, set) }
 
-#define IP_SET_INIT_UEXT(map)                          \
+#define IP_SET_INIT_UEXT(set)                          \
        { .bytes = ULLONG_MAX, .packets = ULLONG_MAX,   \
-         .timeout = (map)->timeout }
+         .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b)             a##b
+#define IPSET_TOKEN(a, b)              IPSET_CONCAT(a, b)
 
 #endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644 (file)
index 0000000..21217ea
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+       return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+                   const struct ip_set_ext *ext)
+{
+       size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+       if (unlikely(comment->str)) {
+               kfree(comment->str);
+               comment->str = NULL;
+       }
+       if (!len)
+               return;
+       if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+               len = IPSET_MAX_COMMENT_SIZE;
+       comment->str = kzalloc(len + 1, GFP_ATOMIC);
+       if (unlikely(!comment->str))
+               return;
+       strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+       if (!comment->str)
+               return 0;
+       return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+       if (unlikely(!comment->str))
+               return;
+       kfree(comment->str);
+       comment->str = NULL;
+}
+
+#endif
+#endif
index 3aac04167ca70699a60d63c50486eff9d0cc910a..83c2f9e0886cc537f57bf1db5cbfc035fbe93596 100644 (file)
@@ -23,8 +23,8 @@
 /* Set is defined with timeout support: timeout value may be 0 */
 #define IPSET_NO_TIMEOUT       UINT_MAX
 
-#define ip_set_adt_opt_timeout(opt, map)       \
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
+#define ip_set_adt_opt_timeout(opt, set)       \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
 
 static inline unsigned int
 ip_set_timeout_uget(struct nlattr *tb)
index 127d0b90604fa08486a88437fbd9a8e09b315148..275505792664ae4e835401a0503d5d3d9fa66808 100644 (file)
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
 };
 
 /* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
 
 #endif /* _NF_CONNTRACK_COMMON_H */
index f381020eee92835fa68ae19858e1a4ca0437138c..858d9b214053ff141e5e856ce98edbf50412bc00 100644 (file)
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
 
 struct nf_conn;
 
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
-                        TransportAddress *taddr,
-                        union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+                 TransportAddress *taddr, union nf_inet_addr *addr,
+                 __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
 extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  H245_TransportAddress *taddr,
index 6a0664c0c45197a7abece81ecb35fae6e638d8ae..ec2ffaf418c8e8d31e1cca95fea02a85aa48089f 100644 (file)
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
 /* delete keymap entries */
 void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
 
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
 
 #endif /* __KERNEL__ */
 #endif /* _CONNTRACK_PROTO_GRE_H */
index ba7f571a2b1cba1f2381ee50c7030946543f31ac..d5af3c27fb7de0385b11396ac241991bacb3684e 100644 (file)
@@ -107,85 +107,93 @@ enum sdp_header_types {
        SDP_HDR_MEDIA,
 };
 
-extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      unsigned int dataoff,
-                                      const char **dptr,
-                                      unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
-                                         unsigned int protoff, s16 off);
-extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-                                             unsigned int protoff,
-                                             unsigned int dataoff,
-                                             const char **dptr,
-                                             unsigned int *datalen,
-                                             struct nf_conntrack_expect *exp,
-                                             unsigned int matchoff,
-                                             unsigned int matchlen);
-extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
-                                           unsigned int protoff,
-                                           unsigned int dataoff,
-                                           const char **dptr,
-                                           unsigned int *datalen,
-                                           unsigned int sdpoff,
-                                           enum sdp_header_types type,
-                                           enum sdp_header_types term,
-                                           const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
-                                           unsigned int protoff,
-                                           unsigned int dataoff,
-                                           const char **dptr,
-                                           unsigned int *datalen,
-                                           unsigned int matchoff,
-                                           unsigned int matchlen,
-                                           u_int16_t port);
-extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-                                              unsigned int protoff,
-                                              unsigned int dataoff,
-                                              const char **dptr,
-                                              unsigned int *datalen,
-                                              unsigned int sdpoff,
-                                              const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
-                                            unsigned int protoff,
-                                            unsigned int dataoff,
-                                            const char **dptr,
-                                            unsigned int *datalen,
-                                            struct nf_conntrack_expect *rtp_exp,
-                                            struct nf_conntrack_expect *rtcp_exp,
-                                            unsigned int mediaoff,
-                                            unsigned int medialen,
-                                            union nf_inet_addr *rtp_addr);
-
-extern int ct_sip_parse_request(const struct nf_conn *ct,
-                               const char *dptr, unsigned int datalen,
-                               unsigned int *matchoff, unsigned int *matchlen,
-                               union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
-                            unsigned int dataoff, unsigned int datalen,
-                            enum sip_header_types type,
-                            unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
-                                  unsigned int *dataoff, unsigned int datalen,
-                                  enum sip_header_types type, int *in_header,
-                                  unsigned int *matchoff, unsigned int *matchlen,
-                                  union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
-                                     unsigned int dataoff, unsigned int datalen,
-                                     const char *name,
-                                     unsigned int *matchoff, unsigned int *matchlen,
-                                     union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
-                                       unsigned int off, unsigned int datalen,
-                                       const char *name,
-                                       unsigned int *matchoff, unsigned int *matchen,
-                                       unsigned int *val);
-
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
-                                unsigned int dataoff, unsigned int datalen,
+struct nf_nat_sip_hooks {
+       unsigned int (*msg)(struct sk_buff *skb,
+                           unsigned int protoff,
+                           unsigned int dataoff,
+                           const char **dptr,
+                           unsigned int *datalen);
+
+       void (*seq_adjust)(struct sk_buff *skb,
+                          unsigned int protoff, s16 off);
+
+       unsigned int (*expect)(struct sk_buff *skb,
+                              unsigned int protoff,
+                              unsigned int dataoff,
+                              const char **dptr,
+                              unsigned int *datalen,
+                              struct nf_conntrack_expect *exp,
+                              unsigned int matchoff,
+                              unsigned int matchlen);
+
+       unsigned int (*sdp_addr)(struct sk_buff *skb,
+                                unsigned int protoff,
+                                unsigned int dataoff,
+                                const char **dptr,
+                                unsigned int *datalen,
+                                unsigned int sdpoff,
                                 enum sdp_header_types type,
                                 enum sdp_header_types term,
-                                unsigned int *matchoff, unsigned int *matchlen);
+                                const union nf_inet_addr *addr);
+
+       unsigned int (*sdp_port)(struct sk_buff *skb,
+                                unsigned int protoff,
+                                unsigned int dataoff,
+                                const char **dptr,
+                                unsigned int *datalen,
+                                unsigned int matchoff,
+                                unsigned int matchlen,
+                                u_int16_t port);
+
+       unsigned int (*sdp_session)(struct sk_buff *skb,
+                                   unsigned int protoff,
+                                   unsigned int dataoff,
+                                   const char **dptr,
+                                   unsigned int *datalen,
+                                   unsigned int sdpoff,
+                                   const union nf_inet_addr *addr);
+
+       unsigned int (*sdp_media)(struct sk_buff *skb,
+                                 unsigned int protoff,
+                                 unsigned int dataoff,
+                                 const char **dptr,
+                                 unsigned int *datalen,
+                                 struct nf_conntrack_expect *rtp_exp,
+                                 struct nf_conntrack_expect *rtcp_exp,
+                                 unsigned int mediaoff,
+                                 unsigned int medialen,
+                                 union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+                        unsigned int datalen, unsigned int *matchoff,
+                        unsigned int *matchlen, union nf_inet_addr *addr,
+                        __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+                     unsigned int dataoff, unsigned int datalen,
+                     enum sip_header_types type, unsigned int *matchoff,
+                     unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+                           unsigned int *dataoff, unsigned int datalen,
+                           enum sip_header_types type, int *in_header,
+                           unsigned int *matchoff, unsigned int *matchlen,
+                           union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+                              unsigned int dataoff, unsigned int datalen,
+                              const char *name, unsigned int *matchoff,
+                              unsigned int *matchlen, union nf_inet_addr *addr,
+                              bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+                                unsigned int off, unsigned int datalen,
+                                const char *name, unsigned int *matchoff,
+                                unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+                         unsigned int dataoff, unsigned int datalen,
+                         enum sdp_header_types type,
+                         enum sdp_header_types term,
+                         unsigned int *matchoff, unsigned int *matchlen);
 
 #endif /* __KERNEL__ */
 #endif /* __NF_CONNTRACK_SIP_H__ */
index cadb7402d7a713fdcedbe4b3cb77c6095854f3ff..28c74367e900ac679aa5feba98b7629ea6338af3 100644 (file)
@@ -14,6 +14,9 @@ struct nfnl_callback {
        int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 
                    const struct nlmsghdr *nlh,
                    const struct nlattr * const cda[]);
+       int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+                         const struct nlmsghdr *nlh,
+                         const struct nlattr * const cda[]);
        const struct nla_policy *policy;        /* netlink attribute policy */
        const u_int16_t attr_count;             /* number of nlattr's */
 };
@@ -23,22 +26,24 @@ struct nfnetlink_subsystem {
        __u8 subsys_id;                 /* nfnetlink subsystem ID */
        __u8 cb_count;                  /* number of callbacks */
        const struct nfnl_callback *cb; /* callback for individual types */
+       int (*commit)(struct sk_buff *skb);
+       int (*abort)(struct sk_buff *skb);
 };
 
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
 
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
-                                          u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
-                         unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
-                            u32 portid, int flags);
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+                                   u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+                  unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+                     int flags);
 
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
 
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
index bb4bbc9b7a18c4834724eabe574b76670806c8c4..b2e85e59f76085cb0e56294c78b63d3a04a0762a 100644 (file)
@@ -6,8 +6,8 @@
 
 struct nf_acct;
 
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 
 #endif /* _NFNL_ACCT_H */
index dd49566315c616f1d4db4cba47ee1dc9ecaefa26..a3e215bb0241d47379bce4ff6e81ba2b4995d3c3 100644 (file)
@@ -229,50 +229,48 @@ struct xt_table_info {
 
 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
                          + nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
-
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
-
-extern int xt_check_match(struct xt_mtchk_param *,
-                         unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
-                          unsigned int size, u_int8_t proto, bool inv_proto);
-
-extern struct xt_table *xt_register_table(struct net *net,
-                                         const struct xt_table *table,
-                                         struct xt_table_info *bootstrap,
-                                         struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
-
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
-                                             unsigned int num_counters,
-                                             struct xt_table_info *newinfo,
-                                             int *error);
-
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
-                                             u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
-                                               u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
-                           int target, int *err);
-
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
-                                          const char *name);
-extern void xt_table_unlock(struct xt_table *t);
-
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
-
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+                  bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+                   bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+                                  const struct xt_table *table,
+                                  struct xt_table_info *bootstrap,
+                                  struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+                                      unsigned int num_counters,
+                                      struct xt_table_info *newinfo,
+                                      int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+                    int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+                                   const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
 
 /**
  * xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
        return ret;
 }
 
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
 
 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
-
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
-
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
-                                    void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
-                                  void __user **dstptr, unsigned int *size);
-
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
-                                      void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
-                                   void __user **dstptr, unsigned int *size);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+                             unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+                           void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+                               unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+                            void __user **dstptr, unsigned int *size);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
index dfb4d9e52bcb3a1775eebd627d1d758aaaed6c4b..8ab1c278b66da77229647e08ef8e0d948d4e6122 100644 (file)
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
 #define BRNF_PPPoE                     0x20
 
 /* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
 static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
 {
        if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
        return 0;
 }
 
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
 /* Only used in br_device.c */
 static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
index dfaf116b3e8125a3f36511bfe35b64fc81d463e3..6e4591bb54d495d2f4ee3f82058305be7539d025 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
-                                  unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+                      unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
index 2d4df6ce043efab2f9017bc5359b90846f085fb7..64dad1cc1a4bc86d391e8e9cbffea5091b9bfa53 100644 (file)
 
 
 #ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-                                   unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+                       unsigned int dataoff, u_int8_t protocol);
 
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
 
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
index 3ea4cde8701ce1e97d6a39a4a1264452468a5254..96235b53a3fddfa840d56461e1c48ec0fd491548 100644 (file)
@@ -269,9 +269,13 @@ static inline int NFS_STALE(const struct inode *inode)
        return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
 }
 
-static inline int NFS_FSCACHE(const struct inode *inode)
+static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
 {
-       return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+#ifdef CONFIG_NFS_FSCACHE
+       return NFS_I(inode)->fscache;
+#else
+       return NULL;
+#endif
 }
 
 static inline __u64 NFS_FILEID(const struct inode *inode)
@@ -457,14 +461,11 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-                       unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
+extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
+extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
 
 /*
  * linux/fs/nfs/dir.c
index b8cedced50c9c70dd00c680d42a424bcdb90a8a9..f9c0a6cb41e945e148db3d4c927f84e25e76c33b 100644 (file)
@@ -41,6 +41,7 @@ struct nfs_client {
 #define NFS_CS_DISCRTRY                1               /* - disconnect on RPC retry */
 #define NFS_CS_MIGRATION       2               /* - transparent state migr */
 #define NFS_CS_INFINITE_SLOTS  3               /* - don't limit TCP slots */
+#define NFS_CS_NO_RETRANS_TIMEOUT      4       /* - Disable retransmit timeouts */
        struct sockaddr_storage cl_addr;        /* server identifier */
        size_t                  cl_addrlen;
        char *                  cl_hostname;    /* hostname of server */
index 01fd84b566f773505122f235ceb3f70ec158bb04..49f52c8f4422ffd8ce6f0e17d33e960e92361076 100644 (file)
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
        struct inode * (*open_context) (struct inode *dir,
                                struct nfs_open_context *ctx,
                                int open_flags,
-                               struct iattr *iattr);
+                               struct iattr *iattr,
+                               int *);
        int (*have_delegation)(struct inode *, fmode_t);
        int (*return_delegation)(struct inode *);
        struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
index 535cecf1e02f7823a1ec3f416c3b06382ea6d8fa..fcd63baee5f28b3361625cc1877b9a275d221204 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __OF_IRQ_H
 #define __OF_IRQ_H
 
-#if defined(CONFIG_OF)
-struct of_irq;
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
 #include <linux/ioport.h>
 #include <linux/of.h>
 
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently.  However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
 /**
  * of_irq - container for device_node/irq_specifier pair for an irq controller
  * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
 extern int of_irq_count(struct device_node *dev);
 extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 extern void of_irq_init(const struct of_device_id *matches);
 
-#endif /* CONFIG_OF_IRQ */
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently.  However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
deleted file mode 100644 (file)
index c841282..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __OF_RESERVED_MEM_H
-#define __OF_RESERVED_MEM_H
-
-#ifdef CONFIG_OF_RESERVED_MEM
-void of_reserved_mem_device_init(struct device *dev);
-void of_reserved_mem_device_release(struct device *dev);
-void early_init_dt_scan_reserved_mem(void);
-#else
-static inline void of_reserved_mem_device_init(struct device *dev) { }
-static inline void of_reserved_mem_device_release(struct device *dev) { }
-static inline void early_init_dt_scan_reserved_mem(void) { }
-#endif
-
-#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644 (file)
index 3aca2b8..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- *     Nishanth Menon
- *     Romit Dasgupta
- *     Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_OPP_H__
-#define __LINUX_OPP_H__
-
-#include <linux/err.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-
-struct opp;
-struct device;
-
-enum opp_event {
-       OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
-};
-
-#if defined(CONFIG_PM_OPP)
-
-unsigned long opp_get_voltage(struct opp *opp);
-
-unsigned long opp_get_freq(struct opp *opp);
-
-int opp_get_opp_count(struct device *dev);
-
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-                               bool available);
-
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
-
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
-
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
-
-int opp_enable(struct device *dev, unsigned long freq);
-
-int opp_disable(struct device *dev, unsigned long freq);
-
-struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-#else
-static inline unsigned long opp_get_voltage(struct opp *opp)
-{
-       return 0;
-}
-
-static inline unsigned long opp_get_freq(struct opp *opp)
-{
-       return 0;
-}
-
-static inline int opp_get_opp_count(struct device *dev)
-{
-       return 0;
-}
-
-static inline struct opp *opp_find_freq_exact(struct device *dev,
-                                       unsigned long freq, bool available)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_floor(struct device *dev,
-                                       unsigned long *freq)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_ceil(struct device *dev,
-                                       unsigned long *freq)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline int opp_add(struct device *dev, unsigned long freq,
-                                       unsigned long u_volt)
-{
-       return -EINVAL;
-}
-
-static inline int opp_enable(struct device *dev, unsigned long freq)
-{
-       return 0;
-}
-
-static inline int opp_disable(struct device *dev, unsigned long freq)
-{
-       return 0;
-}
-
-static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif         /* CONFIG_PM_OPP */
-
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
-       return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int opp_init_cpufreq_table(struct device *dev,
-                           struct cpufreq_frequency_table **table);
-void opp_free_cpufreq_table(struct device *dev,
-                               struct cpufreq_frequency_table **table);
-#else
-static inline int opp_init_cpufreq_table(struct device *dev,
-                           struct cpufreq_frequency_table **table)
-{
-       return -EINVAL;
-}
-
-static inline
-void opp_free_cpufreq_table(struct device *dev,
-                               struct cpufreq_frequency_table **table)
-{
-}
-#endif         /* CONFIG_CPU_FREQ */
-
-#endif         /* __LINUX_OPP_H__ */
index 6d53675c2b54691225b12f3f23c914aca86c35ac..98ada58f9942855b90583de9c5ed324993d39101 100644 (file)
@@ -329,7 +329,9 @@ static inline void set_page_writeback(struct page *page)
  * System with lots of page flags available. This allows separate
  * flags for PageHead() and PageTail() checks of compound pages so that bit
  * tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths.
+ * generally not used in hot code paths except arch/powerpc/mm/init_64.c
+ * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
+ * and avoid handling those in real mode.
  */
 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
 __PAGEFLAG(Tail, tail)
index da172f956ad6f0a6ff883d125ca70e9f97510612..d3a888ae4b2e3ad030bdb5f89354eea2f2567d0b 100644 (file)
@@ -330,8 +330,6 @@ struct pci_dev {
        unsigned int    msix_enabled:1;
        unsigned int    ari_enabled:1;  /* ARI forwarding */
        unsigned int    is_managed:1;
-       unsigned int    is_pcie:1;      /* Obsolete. Will be removed.
-                                          Use pci_is_pcie() instead */
        unsigned int    needs_freset:1; /* Dev requires fundamental reset */
        unsigned int    state_saved:1;
        unsigned int    is_physfn:1;
@@ -472,6 +470,10 @@ struct pci_bus {
 /*
  * Returns true if the pci bus is root (behind host-pci bridge),
  * false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  */
 static inline bool pci_is_root_bus(struct pci_bus *pbus)
 {
@@ -1749,11 +1751,11 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
  * pci_is_pcie - check if the PCI device is PCI Express capable
  * @dev: PCI device
  *
- * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
  */
 static inline bool pci_is_pcie(struct pci_dev *dev)
 {
-       return !!pci_pcie_cap(dev);
+       return pci_pcie_cap(dev);
 }
 
 /**
index 866e85c5eb94517fc87ca1d49bc9acbde74dcc74..c8ba627c1d608733b8480bb929d9e81d97e57fa0 100644 (file)
@@ -294,9 +294,31 @@ struct ring_buffer;
  */
 struct perf_event {
 #ifdef CONFIG_PERF_EVENTS
-       struct list_head                group_entry;
+       /*
+        * entry onto perf_event_context::event_list;
+        *   modifications require ctx->lock
+        *   RCU safe iterations.
+        */
        struct list_head                event_entry;
+
+       /*
+        * XXX: group_entry and sibling_list should be mutually exclusive;
+        * either you're a sibling on a group, or you're the group leader.
+        * Rework the code to always use the same list element.
+        *
+        * Locked for modification by both ctx->mutex and ctx->lock; holding
+        * either sufficies for read.
+        */
+       struct list_head                group_entry;
        struct list_head                sibling_list;
+
+       /*
+        * We need storage to track the entries in perf_pmu_migrate_context; we
+        * cannot use the event_entry because of RCU and we want to keep the
+        * group in tact which avoids us using the other two entries.
+        */
+       struct list_head                migrate_entry;
+
        struct hlist_node               hlist_entry;
        int                             nr_siblings;
        int                             group_flags;
similarity index 97%
rename from include/linux/i2c/at24.h
rename to include/linux/platform_data/at24.h
index 285025a9cdc9a0533e2fb61abbdc71b1808224e8..c42aa89d34eeb46ff0ec5f4e68229ad1fa0fad2a 100644 (file)
@@ -28,7 +28,7 @@
  *
  * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
  * {
- *     u8 *mac_addr = ethernet_pdata->mac_addr;
+ *     u8 *mac_addr = ethernet_pdata->mac_addr;
  *     off_t offset = context;
  *
  *     // Read MAC addr from EEPROM
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644 (file)
index 0000000..5151b00
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ *     Nishanth Menon
+ *     Romit Dasgupta
+ *     Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+       OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                             unsigned long freq,
+                                             bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                             unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                            unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+                  unsigned long u_volt);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+       return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+       return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+       return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                       unsigned long freq, bool available)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                       unsigned long *freq)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                       unsigned long *freq)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+                                       unsigned long u_volt)
+{
+       return -EINVAL;
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+       return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+       return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+                                                       struct device *dev)
+{
+       return ERR_PTR(-EINVAL);
+}
+#endif         /* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+       return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+                               struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table)
+{
+       return -EINVAL;
+}
+
+static inline
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+                               struct cpufreq_frequency_table **table)
+{
+}
+#endif         /* CONFIG_CPU_FREQ */
+
+#endif         /* __LINUX_OPP_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644 (file)
index 0000000..4e25041
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable:                Enable/Disable whole control type.
+ *                     Default is enabled. But this callback allows all zones
+ *                     to be in disable state and remove any applied power
+ *                     limits. If disabled power zone can only be monitored
+ *                     not controlled.
+ * @get_enable:                get Enable/Disable status.
+ * @release:           Callback to inform that last reference to this
+ *                     control type is closed. So it is safe to free data
+ *                     structure associated with this control type.
+ *                     This callback is mandatory if the client own memory
+ *                     for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+       int (*set_enable) (struct powercap_control_type *, bool mode);
+       int (*get_enable) (struct powercap_control_type *, bool *mode);
+       int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name:              name of control_type
+ * @dev:               device for this control_type
+ * @idr:               idr to have unique id for its child
+ * @root_node:         Root holding power zones for this control_type
+ * @ops:               Pointer to callback struct
+ * @node_lock:         mutex for control type
+ * @allocated:         This is possible that client owns the memory
+ *                     used by this structure. In this case
+ *                     this flag is set to false by framework to
+ *                     prevent deallocation during release process.
+ *                     Otherwise this flag is set to true.
+ * @ctrl_inst:         link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+       struct device dev;
+       struct idr idr;
+       int nr_zones;
+       const struct powercap_control_type_ops *ops;
+       struct mutex lock;
+       bool allocated;
+       struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj:   Get maximum range of energy counter in
+ *                             micro-joules.
+ * @get_energy_uj:             Get current energy counter in micro-joules.
+ * @reset_energy_uj:           Reset micro-joules energy counter.
+ * @get_max_power_range_uw:    Get maximum range of power counter in
+ *                             micro-watts.
+ * @get_power_uw:              Get current power counter in micro-watts.
+ * @set_enable:                        Enable/Disable power zone controls.
+ *                             Default is enabled.
+ * @get_enable:                        get Enable/Disable status.
+ * @release:                   Callback to inform that last reference to this
+ *                             control type is closed. So it is safe to free
+ *                             data structure associated with this
+ *                             control type. Mandatory, if client driver owns
+ *                             the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+       int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+       int (*get_energy_uj) (struct powercap_zone *, u64 *);
+       int (*reset_energy_uj) (struct powercap_zone *);
+       int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+       int (*get_power_uw) (struct powercap_zone *, u64 *);
+       int (*set_enable) (struct powercap_zone *, bool mode);
+       int (*get_enable) (struct powercap_zone *, bool *mode);
+       int (*release) (struct powercap_zone *);
+};
+
+#define        POWERCAP_ZONE_MAX_ATTRS         6
+#define        POWERCAP_CONSTRAINTS_ATTRS      8
+#define MAX_CONSTRAINTS_PER_ZONE       10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id:                        Unique id
+ * @name:              Power zone name.
+ * @control_type_inst: Control type instance for this zone.
+ * @ops:               Pointer to the zone operation structure.
+ * @dev:               Instance of a device.
+ * @const_id_cnt:      Number of constraint defined.
+ * @idr:               Instance to an idr entry for children zones.
+ * @parent_idr:                To remove reference from the parent idr.
+ * @private_data:      Private data pointer if any for this zone.
+ * @zone_dev_attrs:    Attributes associated with this device.
+ * @zone_attr_count:   Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups:   Attribute group store to register with device.
+ * @allocated:         This is possible that client owns the memory
+ *                     used by this structure. In this case
+ *                     this flag is set to false by framework to
+ *                     prevent deallocation during release process.
+ *                     Otherwise this flag is set to true.
+ * @constraint_ptr:    List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+       int id;
+       char *name;
+       void *control_type_inst;
+       const struct powercap_zone_ops *ops;
+       struct device dev;
+       int const_id_cnt;
+       struct idr idr;
+       struct idr *parent_idr;
+       void *private_data;
+       struct attribute **zone_dev_attrs;
+       int zone_attr_count;
+       struct attribute_group dev_zone_attr_group;
+       const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+       bool allocated;
+       struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw:                Set power limit in micro-watts.
+ * @get_power_limit_uw:                Get power limit in micro-watts.
+ * @set_time_window_us:                Set time window in micro-seconds.
+ * @get_time_window_us:                Get time window in micro-seconds.
+ * @get_max_power_uw:          Get max power allowed in micro-watts.
+ * @get_min_power_uw:          Get min power allowed in micro-watts.
+ * @get_max_time_window_us:    Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us:    Get min time window allowed in micro-seconds.
+ * @get_name:                  Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ *  set_power_limit_uw
+ *  get_power_limit_uw
+ *  set_time_window_us
+ *  get_time_window_us
+ *  get_name
+ *  Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+       int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+       int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+       int (*set_time_window_us) (struct powercap_zone *, int, u64);
+       int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+       int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+       int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+       int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+       int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+       const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id:                        Instance Id of this constraint.
+ * @power_zone:                Pointer to the power zone for this constraint.
+ * @ops:               Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+       int id;
+       struct powercap_zone *power_zone;
+       struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone)   (&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+* @pdata:      A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+                                               void *pdata)
+{
+       if (power_zone)
+               power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+       if (power_zone)
+               return power_zone->private_data;
+       return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type:       Pointer to client allocated memory for the control type
+*                      structure storage. If this is NULL, powercap framework
+*                      will allocate memory and own it.
+*                      Advantage of this parameter is that client can embed
+*                      this data in its data structures and allocate in a
+*                      single call, preventing multiple allocations.
+* @control_type_name:  The Name of this control_type, which will be shown
+*                      in the sysfs Interface.
+* @ops:                        Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance:   A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone: Pointer to client allocated memory for the power zone structure
+*              storage. If this is NULL, powercap framework will allocate
+*              memory and own it. Advantage of this parameter is that client
+*              can embed this data in its data structures and allocate in a
+*              single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name:       A name for this zone.
+* @parent:     A pointer to the parent power zone instance if any or NULL
+* @ops:                Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops:  Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it.  Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+                       struct powercap_zone *power_zone,
+                       struct powercap_control_type *control_type,
+                       const char *name,
+                       struct powercap_zone *parent,
+                       const struct powercap_zone_ops *ops,
+                       int nr_constraints,
+                       struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type:       A pointer to the valid instance of a control_type.
+* @power_zone: A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type.  Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+                               struct powercap_zone *power_zone);
+
+#endif
index 3b9377d6b7a5fd63b13d02fc238d7da99fbef026..6312dd9ba449b4d65f5bb6bcdc01d606fc2fdfa8 100644 (file)
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
 
 #ifndef MODULE
 extern const struct file_operations random_fops, urandom_fops;
index 67e13aa5a4781d2c8fdfa8fc902d2bbc90f2a3a0..9bdad43ad228a5501c2dc8b0e6cc77a6da8c7b30 100644 (file)
@@ -40,6 +40,8 @@ enum regulator_status {
 };
 
 /**
+ * struct regulator_linear_range - specify linear voltage ranges
+ *
  * Specify a range of voltages for regulator_map_linar_range() and
  * regulator_list_linear_range().
  *
index 6682da36b293cfad598a0c5803e9e32038f72aa3..e27baeeda3f470ed99ae899d8de22da062856bf8 100644 (file)
@@ -1394,11 +1394,10 @@ struct task_struct {
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
        struct memcg_oom_info {
+               struct mem_cgroup *memcg;
+               gfp_t gfp_mask;
+               int order;
                unsigned int may_oom:1;
-               unsigned int in_memcg_oom:1;
-               unsigned int oom_locked:1;
-               int wakeups;
-               struct mem_cgroup *wait_on_memcg;
        } memcg_oom;
 #endif
 #ifdef CONFIG_UPROBES
index d34049712a4d7cee24958816840ca1c843e0541f..3dbdf7e53dcc0942153be0a59cdeebd73caa0973 100644 (file)
@@ -5,18 +5,22 @@
 #include <linux/sh_dma.h>
 
 /*
- * Generic header for SuperH (H)SCI(F) (used by sh/sh64/h8300 and related parts)
+ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
  */
 
 #define SCIx_NOT_SUPPORTED     (-1)
 
 enum {
+       SCBRR_ALGO_INVALID,
+
        SCBRR_ALGO_1,           /* ((clk + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_2,           /* ((clk + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_3,           /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_4,           /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_5,           /* (((clk * 1000 / 32) / bps) - 1) */
        SCBRR_ALGO_6,           /* HSCIF variable sample rate algorithm */
+
+       SCBRR_NR_ALGOS,
 };
 
 #define SCSCR_TIE      (1 << 7)
index 2ddb48d9312c260e1e41a87ac25a19314f2aa379..2c154976394b8a50baec6b4f17486dfb32fdac28 100644 (file)
@@ -318,9 +318,13 @@ enum {
 
        SKB_GSO_GRE = 1 << 6,
 
-       SKB_GSO_UDP_TUNNEL = 1 << 7,
+       SKB_GSO_IPIP = 1 << 7,
 
-       SKB_GSO_MPLS = 1 << 8,
+       SKB_GSO_SIT = 1 << 8,
+
+       SKB_GSO_UDP_TUNNEL = 1 << 9,
+
+       SKB_GSO_MPLS = 1 << 10,
 };
 
 #if BITS_PER_LONG > 32
@@ -498,7 +502,7 @@ struct sk_buff {
         * headers if needed
         */
        __u8                    encapsulation:1;
-       /* 7/9 bit hole (depending on ndisc_nodetype presence) */
+       /* 6/8 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -585,8 +589,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
        skb->_skb_refdst = (unsigned long)dst;
 }
 
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
-                               bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+                        bool force);
 
 /**
  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +638,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
        return (struct rtable *)skb_dst(skb);
 }
 
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void           __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void  __kfree_skb(struct sk_buff *skb);
 extern struct kmem_cache *skbuff_head_cache;
 
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
-                            bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+                     bool *fragstolen, int *delta_truesize);
 
-extern struct sk_buff *__alloc_skb(unsigned int size,
-                                  gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+                           int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
@@ -660,41 +664,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
        return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 }
 
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 {
        return __alloc_skb_head(priority, -1);
 }
 
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
-                                gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
-                               gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
-                                int headroom, gfp_t gfp_mask);
-
-extern int            pskb_expand_head(struct sk_buff *skb,
-                                       int nhead, int ntail,
-                                       gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-                                           unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-                                      int newheadroom, int newtailroom,
-                                      gfp_t priority);
-extern int            skb_to_sgvec(struct sk_buff *skb,
-                                   struct scatterlist *sg, int offset,
-                                   int len);
-extern int            skb_cow_data(struct sk_buff *skb, int tailbits,
-                                   struct sk_buff **trailer);
-extern int            skb_pad(struct sk_buff *skb, int pad);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+                                    unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+                               int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+                int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)       consume_skb(a)
 
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                       int getfrag(void *from, char *to, int offset,
-                       int len,int odd, struct sk_buff *skb),
-                       void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length);
 
 struct skb_seq_state {
        __u32           lower_offset;
@@ -706,18 +702,17 @@ struct skb_seq_state {
        __u8            *frag_data;
 };
 
-extern void          skb_prepare_seq_read(struct sk_buff *skb,
-                                          unsigned int from, unsigned int to,
-                                          struct skb_seq_state *st);
-extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
-                                  struct skb_seq_state *st);
-extern void          skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+                         unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+                         struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
 
-extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
-                                   unsigned int to, struct ts_config *config,
-                                   struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+                          unsigned int to, struct ts_config *config,
+                          struct ts_state *state);
 
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 {
        if (!skb->l4_rxhash)
@@ -1095,7 +1090,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
  *     The "__skb_xxxx()" functions are the non-atomic ones that
  *     can only be called with interrupts disabled.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
@@ -1201,8 +1197,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
        __skb_insert(newsk, prev, prev->next, list);
 }
 
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
-                      struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 
 static inline void __skb_queue_before(struct sk_buff_head *list,
                                      struct sk_buff *next,
@@ -1221,7 +1217,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_head(struct sk_buff_head *list,
                                    struct sk_buff *newsk)
 {
@@ -1238,7 +1234,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_tail(struct sk_buff_head *list,
                                   struct sk_buff *newsk)
 {
@@ -1249,7 +1245,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void       skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
        struct sk_buff *next, *prev;
@@ -1270,7 +1266,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The head item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1283,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The tail item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek_tail(list);
@@ -1373,8 +1369,8 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
        skb_shinfo(skb)->nr_frags = i + 1;
 }
 
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
-                           int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+                    int size, unsigned int truesize);
 
 #define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
 #define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1414,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
 /*
  *     Add data to an sk_buff
  */
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
        unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1424,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
        return tmp;
 }
 
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 {
        skb->data -= len;
@@ -1436,7 +1432,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
        return skb->data;
 }
 
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
        skb->len -= len;
@@ -1449,7 +1445,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
 }
 
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
@@ -1753,7 +1749,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
 #define NET_SKB_PAD    max(32, L1_CACHE_BYTES)
 #endif
 
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1765,7 +1761,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
        skb_set_tail_pointer(skb, len);
 }
 
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1838,7 +1834,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
  *     the list and one reference dropped. This function does not take the
  *     list lock and the caller must hold the relevant locks to use it.
  */
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
 static inline void __skb_queue_purge(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
@@ -1850,11 +1846,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
 #define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
 
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
 
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-                                         unsigned int length,
-                                         gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+                                  gfp_t gfp_mask);
 
 /**
  *     netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2066,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
        __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
 }
 
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
 /**
  * skb_frag_dma_map - maps a paged fragment via the DMA API
  * @dev: the device to map the fragment to
@@ -2342,60 +2339,42 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
 #define skb_walk_frags(skb, iter)      \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-                                          int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                        int noblock, int *err);
-extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-                                    struct poll_table_struct *wait);
-extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
-                                              int offset, struct iovec *to,
-                                              int size);
-extern int            skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-                                                       int hlen,
-                                                       struct iovec *iov);
-extern int            skb_copy_datagram_from_iovec(struct sk_buff *skb,
-                                                   int offset,
-                                                   const struct iovec *from,
-                                                   int from_offset,
-                                                   int len);
-extern int            zerocopy_sg_from_iovec(struct sk_buff *skb,
-                                             const struct iovec *frm,
-                                             int offset,
-                                             size_t count);
-extern int            skb_copy_datagram_const_iovec(const struct sk_buff *from,
-                                                    int offset,
-                                                    const struct iovec *to,
-                                                    int to_offset,
-                                                    int size);
-extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void           skb_free_datagram_locked(struct sock *sk,
-                                               struct sk_buff *skb);
-extern int            skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-                                        unsigned int flags);
-extern __wsum         skb_checksum(const struct sk_buff *skb, int offset,
-                                   int len, __wsum csum);
-extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
-                                    void *to, int len);
-extern int            skb_store_bits(struct sk_buff *skb, int offset,
-                                     const void *from, int len);
-extern __wsum         skb_copy_and_csum_bits(const struct sk_buff *skb,
-                                             int offset, u8 *to, int len,
-                                             __wsum csum);
-extern int             skb_splice_bits(struct sk_buff *skb,
-                                               unsigned int offset,
-                                               struct pipe_inode_info *pipe,
-                                               unsigned int len,
-                                               unsigned int flags);
-extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void           skb_split(struct sk_buff *skb,
-                                struct sk_buff *skb1, const u32 len);
-extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
-                                int shiftlen);
-extern void           skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
-                                  netdev_features_t features);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+                                   int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+                                 int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+                           struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+                                    struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+                                const struct iovec *from, int from_offset,
+                                int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+                          int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+                                 const struct iovec *to, int to_offset,
+                                 int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+                   __wsum csum);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+                             int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int len,
+                   unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
@@ -2440,7 +2419,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
        memcpy(skb->data + offset, from, len);
 }
 
-extern void skb_init(void);
+void skb_init(void);
 
 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
 {
@@ -2483,12 +2462,12 @@ static inline ktime_t net_invalid_timestamp(void)
        return ktime_set(0, 0);
 }
 
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
 
 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
 
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
 
 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
 
@@ -2529,8 +2508,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
  * generates a software time stamp (otherwise), then queues the clone
  * to the error queue of the socket.  Errors are silently ignored.
  */
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
-                       struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+                  struct skb_shared_hwtstamps *hwtstamps);
 
 static inline void sw_tx_timestamp(struct sk_buff *skb)
 {
@@ -2562,8 +2541,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
  */
 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
 
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 {
@@ -2593,7 +2572,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
 }
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
        if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2732,28 +2711,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-extern u16 __skb_tx_hash(const struct net_device *dev,
-                        const struct sk_buff *skb,
-                        unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+                 unsigned int num_tx_queues);
 
-#ifdef CONFIG_XFRM
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
        return skb->sp;
-}
 #else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
        return NULL;
-}
 #endif
+}
 
 /* Keeps track of mac header offset relative to skb->head.
  * It is useful for TSO of Tunneling protocol. e.g. GRE.
  * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header. */
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
 struct skb_gso_cb {
-       int mac_offset;
+       int     mac_offset;
+       int     encap_level;
 };
 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
 
@@ -2783,12 +2761,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_size;
 }
 
+/* Note: Should be called only if skb_is_gso(skb) is true */
 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
 }
 
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
 
 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
 {
index cfb7ca094b384522d2378c2a952bbe788812f1c6..731f5237d5f4e0f87dd817e15983f509d3b4d483 100644 (file)
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 
 static inline void kick_all_cpus_sync(void) {  }
 
+static inline void __smp_call_function_single(int cpuid,
+               struct call_single_data *data, int wait)
+{
+       on_each_cpu(data->func, data->info, wait);
+}
+
 #endif /* !SMP */
 
 /*
index 86a12b0cb239850d903e53b52091ad53fd179115..0688472500bbabb1274a55c1ab0720d39b10922b 100644 (file)
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
        return 0;
 }
 
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+       if (!dev)
+               return -ENODEV;
+
+       return dev->dev->bus->sprom.et0phyaddr;
+}
+
 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
                                          struct pci_dev *pdev);
 extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
 {
        return -ENODEV;
 }
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       return -ENODEV;
+}
 
 #endif /* CONFIG_SSB_DRIVER_GIGE */
 #endif /* LINUX_SSB_DRIVER_GIGE_H_ */
index 6740801aa71ab519c59bd21a7e50ee5793cc242a..943ee895f2d1d02b9cda88875f3765af30c917a6 100644 (file)
@@ -49,6 +49,7 @@ struct rpc_clnt {
 
        unsigned int            cl_softrtry : 1,/* soft timeouts */
                                cl_discrtry : 1,/* disconnect before retry */
+                               cl_noretranstimeo: 1,/* No retransmit timeouts */
                                cl_autobind : 1,/* use getport() */
                                cl_chatty   : 1;/* be verbose */
 
@@ -126,6 +127,7 @@ struct rpc_create_args {
 #define RPC_CLNT_CREATE_QUIET          (1UL << 6)
 #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
 #define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT        (1UL << 8)
+#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
index 096ee58be11a83f2fc05107639a2273cc0c677e6..3a847de83fabd5757ddcee00910a6ab991f79a98 100644 (file)
@@ -122,6 +122,7 @@ struct rpc_task_setup {
 #define RPC_TASK_SENT          0x0800          /* message was sent */
 #define RPC_TASK_TIMEOUT       0x1000          /* fail with ETIMEDOUT on timeout */
 #define RPC_TASK_NOCONNECT     0x2000          /* return ENOTCONN if not connected */
+#define RPC_TASK_NO_RETRANS_TIMEOUT    0x4000          /* wait forever for a reply */
 
 #define RPC_IS_ASYNC(t)                ((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)      ((t)->tk_flags & RPC_TASK_SWAPPER)
index cec7b9b5e1bfb6591ad10997485b05ddc7421120..8097b9df677326717517f256b66dbaa17e27df93 100644 (file)
@@ -288,7 +288,7 @@ int                 xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
-int                    xprt_prepare_transmit(struct rpc_task *task);
+bool                   xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_transmit(struct rpc_task *task);
 void                   xprt_end_transmit(struct rpc_task *task);
 int                    xprt_adjust_timeout(struct rpc_rqst *req);
index dd3edd7dfc94dc73de9389e55ed7f35bdcadfcbd..9d3f1a5b6178a9dd1aa3914b35051b96f5ceb279 100644 (file)
 
 #include <asm/timex.h>
 
+#ifndef random_get_entropy
+/*
+ * The random_get_entropy() function is used by the /dev/random driver
+ * in order to extract entropy via the relative unpredictability of
+ * when an interrupt takes places versus a high speed, fine-grained
+ * timing source or cycle counter.  Since it will be occurred on every
+ * single interrupt, it must have a very low cost/overhead.
+ *
+ * By default we use get_cycles() for this purpose, but individual
+ * architectures may override this in their asm/timex.h header file.
+ */
+#define random_get_entropy()   get_cycles()
+#endif
+
 /*
  * SHIFT_PLL is used as a dampening factor to define how much we
  * adjust the frequency correction for a given offset in PLL mode.
index f9a7e7bc925be61322a24304076503bd918a520a..11d85b9c1b081af6a9f4f30ac3c1458766ed4238 100644 (file)
@@ -12,7 +12,7 @@ struct usb_phy_gen_xceiv_platform_data {
        unsigned int needs_reset:1;
 };
 
-#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
+#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
 /* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
index 9cb2fe8ca944d5095043dd7b7ce71116a3fc21eb..e303eef94dd5cea92d16d7ae63aa35f5c3f1d4bb 100644 (file)
@@ -42,6 +42,7 @@ struct usbnet {
        struct usb_host_endpoint *status;
        unsigned                maxpacket;
        struct timer_list       delay;
+       const char              *padding_pkt;
 
        /* protocol/interface state */
        struct net_device       *net;
index bf99cd01be206ebeb170d826e176c3ac22e1493e..630356866030d88a355390a932105ff9c86b98bb 100644 (file)
@@ -66,7 +66,9 @@
        US_FLAG(INITIAL_READ10, 0x00100000)                     \
                /* Initial READ(10) (and others) must be retried */     \
        US_FLAG(WRITE_CACHE,    0x00200000)                     \
-               /* Write Cache status is not available */
+               /* Write Cache status is not available */       \
+       US_FLAG(NEEDS_CAP16,    0x00400000)
+               /* cannot handle READ_CAPACITY_10 */
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 80cf8173a65b1491bd7e1da05978024acd7889b1..2c02f3a8d2ba3f4ba079a51f537be25742b17162 100644 (file)
@@ -65,15 +65,8 @@ struct pci_dev;
  *     out of the arbitration process (and can be safe to take
  *     interrupts at any time.
  */
-#if defined(CONFIG_VGA_ARB)
 extern void vga_set_legacy_decoding(struct pci_dev *pdev,
                                    unsigned int decodes);
-#else
-static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
-                                          unsigned int decodes)
-{
-}
-#endif
 
 /**
  *     vga_get         - acquire & locks VGA resources
index 7fe28228b2742efc0c9eb69c7bbc866f0194ef19..512cdc2fb80f40f99bb1b09fe1d0d4dcfeab345c 100644 (file)
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
 
 struct yamdrv_ioctl_mcs {
        int cmd;
-       int bitrate;
+       unsigned int bitrate;
        unsigned char bits[YAM_FPGA_SIZE];
 };
index 16550c4390081a03a8f2d4c8d3fdc51de54cc006..a707529841e27d92e2bfe1abfda2b5f0b81b1560 100644 (file)
@@ -35,7 +35,7 @@
        printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg)
 
 #define v4l_client_printk(level, client, fmt, arg...)                      \
-       v4l_printk(level, (client)->driver->driver.name, (client)->adapter, \
+       v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \
                   (client)->addr, fmt , ## arg)
 
 #define v4l_err(client, fmt, arg...) \
index 6781258d0b677f4a7092b4d29d0c1c7094e8a8a0..bd8218b15009a810af8abd5df5a4bcad31dfdb20 100644 (file)
@@ -391,7 +391,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
 unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
                loff_t *ppos, int nonblock);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
                loff_t *ppos, int nonblock);
 
 /**
@@ -491,7 +491,7 @@ int vb2_ioctl_expbuf(struct file *file, void *priv,
 
 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
 int vb2_fop_release(struct file *file);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos);
 ssize_t vb2_fop_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos);
index 0038526b8ef7c0495bf12784a6c3c7a855c0b144..7b89852779af77b0f156f635f7b8e5287d9a9be1 100644 (file)
 
 #include <media/videobuf2-core.h>
 
-struct vb2_dma_sg_desc {
-       unsigned long           size;
-       unsigned int            num_pages;
-       struct scatterlist      *sglist;
-};
-
-static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc(
+static inline struct sg_table *vb2_dma_sg_plane_desc(
                struct vb2_buffer *vb, unsigned int plane_no)
 {
-       return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no);
+       return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
 }
 
 extern const struct vb2_mem_ops vb2_dma_sg_memops;
index fb314de2b61ba228059b1f09cb916d7d275a6a9b..86505bfa5d2c4829698d21ce81ba47575b504941 100644 (file)
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
 #endif
 
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+                                  const unsigned int prefix_len,
+                                  struct net_device *dev);
+
 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
 
 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
index bf2ddffdae2d226d0727656cb1f98ef0eab8f94a..2cc9517fb0d5006f4b5b4f9570cf7a748d3e3518 100644 (file)
@@ -333,16 +333,16 @@ out:
 
 int bt_to_errno(__u16 code);
 
-extern int hci_sock_init(void);
-extern void hci_sock_cleanup(void);
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
 
-extern int bt_sysfs_init(void);
-extern void bt_sysfs_cleanup(void);
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
 
-extern int  bt_procfs_init(struct net *net, const char *name,
-                          struct bt_sock_list* sk_list,
-                          int (* seq_show)(struct seq_file *, void *));
-extern void bt_procfs_cleanup(struct net *net, const char *name);
+int bt_procfs_init(struct net *net, const char *name,
+                  struct bt_sock_list *sk_list,
+                  int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
 
 extern struct dentry *bt_debugfs;
 
index 4e208420d84c6bdfbe281dcb7de981d846413bd2..2dc467939be7e9a189bef661ae96762a8eb31fe8 100644 (file)
@@ -372,18 +372,17 @@ extern rwlock_t hci_dev_list_lock;
 extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
-extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
-extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
-                             u16 flags);
-
-extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
index 3588f48bfd354828ca6891f6306491fe83696291..486213a1aed8d07ad63aaba57957e7a651657696 100644 (file)
@@ -256,8 +256,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
                rfcomm_dlc_free(d);
 }
 
-extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
-extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
 
 static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
 {
index 4795e817afe508b6f404aebbd71e6ebe4659cb62..097f69cfaa75fbe2ce56c2ae33f4c96351b3add6 100644 (file)
@@ -195,6 +195,6 @@ enum ifla_caif_hsi {
        __IFLA_CAIF_HSI_MAX
 };
 
-extern struct cfhsi_ops *cfhsi_get_ops(void);
+struct cfhsi_ops *cfhsi_get_ops(void);
 
 #endif         /* CAIF_HSI_H_ */
index a7a683e30b64e6beb2bc87907c85576d85385007..a8c2ef6d3b932abbb42e28be158d472effd02513 100644 (file)
@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        unsigned char err_offset = 0;
        u8 opt_len = opt[1];
        u8 opt_iter;
+       u8 tag_len;
 
        if (opt_len < 8) {
                err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
-               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+               tag_len = opt[opt_iter + 1];
+               if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
                        goto out;
                }
-               opt_iter += opt[opt_iter + 1];
+               opt_iter += tag_len;
        }
 
 out:
index 6e9565324989dd0ad4eb92809af943f61055a047..3b603b199c01c5d554202323dde4253e01f7a4ab 100644 (file)
@@ -29,8 +29,8 @@ struct compat_cmsghdr {
        compat_int_t    cmsg_type;
 };
 
-extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
+int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 #else /* defined(CONFIG_COMPAT) */
 /*
@@ -40,24 +40,30 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 #define compat_mmsghdr mmsghdr
 #endif /* defined(CONFIG_COMPAT) */
 
-extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
-                                          unsigned int, unsigned int);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
-                                          unsigned int, unsigned int,
-                                          struct compat_timespec __user *);
-extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
-
-extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
-
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
-       int (*)(struct sock *, int, int, char __user *, unsigned int));
-extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
-       int __user *, int (*)(struct sock *, int, int, char __user *,
-                               int __user *));
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+int verify_compat_iovec(struct msghdr *, struct iovec *,
+                       struct sockaddr_storage *, int);
+asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
+                                  unsigned int);
+asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+                                   unsigned int, unsigned int);
+asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
+                                  unsigned int);
+asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
+                                   unsigned int, unsigned int,
+                                   struct compat_timespec __user *);
+asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
+                                     int __user *);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+                                    unsigned char *, int);
+
+int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+                        int (*)(struct sock *, int, int, char __user *,
+                                unsigned int));
+int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
+                        int (*)(struct sock *, int, int, char __user *,
+                                int __user *));
 
 #endif /* NET_COMPAT_H */
index 443626ed4cbc4daba803c58dfc76128c5c0f5b67..d2f3041c0dfaa685b2f32a633ee0931bad036329 100644 (file)
@@ -25,9 +25,9 @@ enum dcbevent_notif_type {
 };
 
 #ifdef CONFIG_DCB
-extern int register_dcbevent_notifier(struct notifier_block *nb);
-extern int unregister_dcbevent_notifier(struct notifier_block *nb);
-extern int call_dcbevent_notifiers(unsigned long val, void *v);
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
 #else
 static inline int
 register_dcbevent_notifier(struct notifier_block *nb)
index c88bf4ebd330723de80a330b11baf65507e94d24..ccc15588d108ecc2a33d5b2544b8e0e1feb33069 100644 (file)
@@ -199,24 +199,26 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
        fld->fld_dport = scp->addrrem;
 }
 
-extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
 #define DN_MENUVER_PRX 0x04
 #define DN_MENUVER_UIC 0x08
 
-extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+struct sock *dn_find_by_skb(struct sk_buff *skb);
 #define DN_ASCBUF_LEN 9
-extern char *dn_addr2asc(__u16, char *);
-extern int dn_destroy_timer(struct sock *sk);
+char *dn_addr2asc(__u16, char *);
+int dn_destroy_timer(struct sock *sk);
 
-extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
-extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
+                        unsigned char type);
+int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
+                        unsigned char *type);
 
-extern void dn_start_slow_timer(struct sock *sk);
-extern void dn_stop_slow_timer(struct sock *sk);
+void dn_start_slow_timer(struct sock *sk);
+void dn_stop_slow_timer(struct sock *sk);
 
 extern __le16 decnet_address;
 extern int decnet_debug_level;
index b9e32db03f2040e76d7d73295406772de0029930..20b5ab06032d35dc1f5f3175d9b88d59db2e2232 100644 (file)
@@ -148,27 +148,27 @@ struct rtnode_hello_message {
 } __packed;
 
 
-extern void dn_dev_init(void);
-extern void dn_dev_cleanup(void);
+void dn_dev_init(void);
+void dn_dev_cleanup(void);
 
-extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+int dn_dev_ioctl(unsigned int cmd, void __user *arg);
 
-extern void dn_dev_devices_off(void);
-extern void dn_dev_devices_on(void);
+void dn_dev_devices_off(void);
+void dn_dev_devices_on(void);
 
-extern void dn_dev_init_pkt(struct sk_buff *skb);
-extern void dn_dev_veri_pkt(struct sk_buff *skb);
-extern void dn_dev_hello(struct sk_buff *skb);
+void dn_dev_init_pkt(struct sk_buff *skb);
+void dn_dev_veri_pkt(struct sk_buff *skb);
+void dn_dev_hello(struct sk_buff *skb);
 
-extern void dn_dev_up(struct net_device *);
-extern void dn_dev_down(struct net_device *);
+void dn_dev_up(struct net_device *);
+void dn_dev_down(struct net_device *);
 
-extern int dn_dev_set_default(struct net_device *dev, int force);
-extern struct net_device *dn_dev_get_default(void);
-extern int dn_dev_bind_default(__le16 *addr);
+int dn_dev_set_default(struct net_device *dev, int force);
+struct net_device *dn_dev_get_default(void);
+int dn_dev_bind_default(__le16 *addr);
 
-extern int register_dnaddr_notifier(struct notifier_block *nb);
-extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+int register_dnaddr_notifier(struct notifier_block *nb);
+int unregister_dnaddr_notifier(struct notifier_block *nb);
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
index 74004af31c482c2bef8c9240b47e82c2f4a9bfbd..f2ca135ddcc979b07260c5d5996dc79bf7659536 100644 (file)
@@ -95,41 +95,38 @@ struct dn_fib_table {
 /*
  * dn_fib.c
  */
-extern void dn_fib_init(void);
-extern void dn_fib_cleanup(void);
-
-extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, 
-                       unsigned long arg);
-extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, 
-                               struct nlattr *attrs[],
-                               const struct nlmsghdr *nlh, int *errp);
-extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 
-                       const struct flowidn *fld,
-                       struct dn_fib_res *res);
-extern void dn_fib_release_info(struct dn_fib_info *fi);
-extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowidn *fld,
-                                       struct dn_fib_res *res);
+void dn_fib_init(void);
+void dn_fib_cleanup(void);
+
+int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+                                      struct nlattr *attrs[],
+                                      const struct nlmsghdr *nlh, int *errp);
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+                         const struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_release_info(struct dn_fib_info *fi);
+void dn_fib_flush(void);
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
 
 /*
  * dn_tables.c
  */
-extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-extern struct dn_fib_table *dn_fib_empty_table(void);
-extern void dn_fib_table_init(void);
-extern void dn_fib_table_cleanup(void);
+struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
+struct dn_fib_table *dn_fib_empty_table(void);
+void dn_fib_table_init(void);
+void dn_fib_table_cleanup(void);
 
 /*
  * dn_rules.c
  */
-extern void dn_fib_rules_init(void);
-extern void dn_fib_rules_cleanup(void);
-extern unsigned int dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_rules_init(void);
+void dn_fib_rules_cleanup(void);
+unsigned int dnet_addr_type(__le16 addr);
+int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
-extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-extern void dn_fib_free_info(struct dn_fib_info *fi);
+void dn_fib_free_info(struct dn_fib_info *fi);
 
 static inline void dn_fib_info_put(struct dn_fib_info *fi)
 {
index 4cb4ae7fb81fddc3915402e26b90a4ba8ac28c78..fac4e3f4a6d3c0ede92af3d4df4c24319a94c0a0 100644 (file)
@@ -16,12 +16,12 @@ struct dn_neigh {
        __u8 priority;
 };
 
-extern void dn_neigh_init(void);
-extern void dn_neigh_cleanup(void);
-extern int dn_neigh_router_hello(struct sk_buff *skb);
-extern int dn_neigh_endnode_hello(struct sk_buff *skb);
-extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+void dn_neigh_init(void);
+void dn_neigh_cleanup(void);
+int dn_neigh_router_hello(struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sk_buff *skb);
+void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
 
 extern struct neigh_table dn_neigh_table;
 
index e43a2893f132f0767849271f3eac0f625dbd8828..3a3e33d184562ae778db49b0cabb1e524c2ee548 100644 (file)
 *******************************************************************************/
 /* dn_nsp.c functions prototyping */
 
-extern void dn_nsp_send_data_ack(struct sock *sk);
-extern void dn_nsp_send_oth_ack(struct sock *sk);
-extern void dn_nsp_delayed_ack(struct sock *sk);
-extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 
-                       unsigned short reason, gfp_t gfp);
-extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
-                               unsigned short reason);
-extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-extern void dn_nsp_output(struct sock *sk);
-extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
-extern unsigned long dn_nsp_persist(struct sock *sk);
-extern int dn_nsp_xmit_timeout(struct sock *sk);
-
-extern int dn_nsp_rx(struct sk_buff *);
-extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+void dn_nsp_send_data_ack(struct sock *sk);
+void dn_nsp_send_oth_ack(struct sock *sk);
+void dn_nsp_delayed_ack(struct sock *sk);
+void dn_send_conn_ack(struct sock *sk);
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
+void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+                     unsigned short reason, gfp_t gfp);
+void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+                       unsigned short reason);
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+
+void dn_nsp_output(struct sock *sk);
+int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
+                           struct sk_buff_head *q, unsigned short acknum);
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
+                      int oob);
+unsigned long dn_nsp_persist(struct sock *sk);
+int dn_nsp_xmit_timeout(struct sock *sk);
+
+int dn_nsp_rx(struct sk_buff *);
+int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
+                                 long timeo, int *err);
 
 #define NSP_REASON_OK 0                /* No error */
 #define NSP_REASON_NR 1                /* No resources */
index 2e9d317c82dcc7164ef9c3cfbfc8429c6364031a..b409ad6b8d7adbb5bd25ad3b71cbea5c8723b70e 100644 (file)
     GNU General Public License for more details.
 *******************************************************************************/
 
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, struct sock *sk, int flags);
-extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_rt_cache_flush(int delay);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
+                        struct sock *sk, int flags);
+int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void dn_rt_cache_flush(int delay);
 
 /* Masks for flags field */
 #define DN_RT_F_PID 0x07 /* Mask for packet type                      */
@@ -92,8 +93,8 @@ static inline bool dn_is_output_route(struct dn_route *rt)
        return rt->fld.flowidn_iif == 0;
 }
 
-extern void dn_route_init(void);
-extern void dn_route_cleanup(void);
+void dn_route_init(void);
+void dn_route_cleanup(void);
 
 #include <net/sock.h>
 #include <linux/if_arp.h>
index 3bc4865f82679137077db4dd82c177bd39b51515..44995c13e941df814db2433819d2934869df4d0f 100644 (file)
@@ -106,7 +106,7 @@ struct dst_entry {
        };
 };
 
-extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY  0x1UL
@@ -119,7 +119,7 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
        return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
-extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
 {
@@ -262,7 +262,7 @@ static inline struct dst_entry *dst_clone(struct dst_entry *dst)
        return dst;
 }
 
-extern void dst_release(struct dst_entry *dst);
+void dst_release(struct dst_entry *dst);
 
 static inline void refdst_drop(unsigned long refdst)
 {
@@ -362,12 +362,11 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
        return child;
 }
 
-extern int dst_discard(struct sk_buff *skb);
-extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-                      int initial_ref, int initial_obsolete,
-                      unsigned short flags);
-extern void __dst_free(struct dst_entry *dst);
-extern struct dst_entry *dst_destroy(struct dst_entry *dst);
+int dst_discard(struct sk_buff *skb);
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+               int initial_obsolete, unsigned short flags);
+void __dst_free(struct dst_entry *dst);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
 
 static inline void dst_free(struct dst_entry *dst)
 {
@@ -463,7 +462,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
-extern void            dst_init(void);
+void dst_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
@@ -479,10 +478,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
 {
        return dst_orig;
 } 
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return NULL;
+}
+
 #else
-extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
-                                    const struct flowi *fl, struct sock *sk,
-                                    int flags);
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                             const struct flowi *fl, struct sock *sk,
+                             int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return dst->xfrm;
+}
 #endif
 
 #endif /* _NET_DST_H */
index d58451331dbde8a0d9b4231a61bd4957f62cdeec..1356dda00d22f9cd3fad3ea0eb625b667f76c017 100644 (file)
@@ -13,7 +13,7 @@ struct esp_data {
        struct crypto_aead *aead;
 };
 
-extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
 
 struct ip_esp_hdr;
 
index 4b2b557fb0e8c2d709e1733a13e4ca3dbc02ff0e..e584de16e4c3629ccebef06d000b47166e5cc65d 100644 (file)
@@ -115,14 +115,13 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
        return frh->table;
 }
 
-extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
-extern void fib_rules_unregister(struct fib_rules_ops *);
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+                                        struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
 
-extern int                     fib_rules_lookup(struct fib_rules_ops *,
-                                                struct flowi *, int flags,
-                                                struct fib_lookup_arg *);
-extern int                     fib_default_rule_add(struct fib_rules_ops *,
-                                                    u32 pref, u32 table,
-                                                    u32 flags);
-extern u32                     fib_default_rule_pref(struct fib_rules_ops *ops);
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+                    struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+                        u32 flags);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops);
 #endif
index 628e11b98c580d7f66ace375f5a4e0598d03b471..65ce471d2ab577f81cc62040018aff980ea82f65 100644 (file)
@@ -215,12 +215,13 @@ typedef struct flow_cache_object *(*flow_resolve_t)(
                struct net *net, const struct flowi *key, u16 family,
                u8 dir, struct flow_cache_object *oldobj, void *ctx);
 
-extern struct flow_cache_object *flow_cache_lookup(
-               struct net *net, const struct flowi *key, u16 family,
-               u8 dir, flow_resolve_t resolver, void *ctx);
+struct flow_cache_object *flow_cache_lookup(struct net *net,
+                                           const struct flowi *key, u16 family,
+                                           u8 dir, flow_resolve_t resolver,
+                                           void *ctx);
 
-extern void flow_cache_flush(void);
-extern void flow_cache_flush_deferred(void);
+void flow_cache_flush(void);
+void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index bb8271d487b7bea10861c6548dde50d0964c4a20..7e64bd8bbda941319b494bcb7b66e79b7f52370a 100644 (file)
@@ -13,5 +13,6 @@ struct flow_keys {
        u8 ip_proto;
 };
 
-extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
 #endif
index 834d8add9e5fd51112f08cde966e0044604049cb..abf33bbd2e6a0413193769d353bb9ce82a10ebec 100644 (file)
@@ -112,19 +112,18 @@ struct garp_port {
        struct rcu_head                 rcu;
 };
 
-extern int     garp_register_application(struct garp_application *app);
-extern void    garp_unregister_application(struct garp_application *app);
-
-extern int     garp_init_applicant(struct net_device *dev,
-                                   struct garp_application *app);
-extern void    garp_uninit_applicant(struct net_device *dev,
-                                     struct garp_application *app);
-
-extern int     garp_request_join(const struct net_device *dev,
-                                 const struct garp_application *app,
-                                 const void *data, u8 len, u8 type);
-extern void    garp_request_leave(const struct net_device *dev,
-                                  const struct garp_application *app,
-                                  const void *data, u8 len, u8 type);
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
+
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+                          struct garp_application *app);
+
+int garp_request_join(const struct net_device *dev,
+                     const struct garp_application *app, const void *data,
+                     u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+                       const struct garp_application *app,
+                       const void *data, u8 len, u8 type);
 
 #endif /* _NET_GARP_H */
index cf8439ba4d11062a326919fa885c3ae5a24e62c5..ea4271dceff0da2df04497e424e49c18b9a6d6f5 100644 (file)
@@ -19,32 +19,31 @@ struct gnet_dump {
        struct tc_stats   tc_stats;
 };
 
-extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+                         struct gnet_dump *d);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+                                int tc_stats_type, int xstats_type,
                                 spinlock_t *lock, struct gnet_dump *d);
 
-extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
-                                       int tc_stats_type,int xstats_type,
-                                       spinlock_t *lock, struct gnet_dump *d);
-
-extern int gnet_stats_copy_basic(struct gnet_dump *d,
-                                struct gnet_stats_basic_packed *b);
-extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
-                                   const struct gnet_stats_basic_packed *b,
-                                   struct gnet_stats_rate_est64 *r);
-extern int gnet_stats_copy_queue(struct gnet_dump *d,
-                                struct gnet_stats_queue *q);
-extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
-
-extern int gnet_stats_finish_copy(struct gnet_dump *d);
-
-extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                            struct gnet_stats_rate_est64 *rate_est,
-                            spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
-                              struct gnet_stats_rate_est64 *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                                struct gnet_stats_rate_est64 *rate_est,
-                                spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
-                                const struct gnet_stats_rate_est64 *rate_est);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+                         struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+                            const struct gnet_stats_basic_packed *b,
+                            struct gnet_stats_rate_est64 *r);
+int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+int gnet_stats_finish_copy(struct gnet_dump *d);
+
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+                     struct gnet_stats_rate_est64 *rate_est,
+                     spinlock_t *stats_lock, struct nlattr *opt);
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+                       struct gnet_stats_rate_est64 *rate_est);
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+                         struct gnet_stats_rate_est64 *rate_est,
+                         spinlock_t *stats_lock, struct nlattr *opt);
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+                         const struct gnet_stats_rate_est64 *rate_est);
 #endif
index 8e0b6c856a1302227812acbc10cb5e6b1f230bf7..9b787b62cf16066e6f2335301865849fa674f7a1 100644 (file)
@@ -122,7 +122,7 @@ struct genl_ops {
        struct list_head        ops_list;
 };
 
-extern int __genl_register_family(struct genl_family *family);
+int __genl_register_family(struct genl_family *family);
 
 static inline int genl_register_family(struct genl_family *family)
 {
@@ -130,8 +130,8 @@ static inline int genl_register_family(struct genl_family *family)
        return __genl_register_family(family);
 }
 
-extern int __genl_register_family_with_ops(struct genl_family *family,
-       struct genl_ops *ops, size_t n_ops);
+int __genl_register_family_with_ops(struct genl_family *family,
+                                   struct genl_ops *ops, size_t n_ops);
 
 static inline int genl_register_family_with_ops(struct genl_family *family,
        struct genl_ops *ops, size_t n_ops)
@@ -140,18 +140,18 @@ static inline int genl_register_family_with_ops(struct genl_family *family,
        return __genl_register_family_with_ops(family, ops, n_ops);
 }
 
-extern int genl_unregister_family(struct genl_family *family);
-extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_register_mc_group(struct genl_family *family,
-                                 struct genl_multicast_group *grp);
-extern void genl_unregister_mc_group(struct genl_family *family,
-                                    struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
-                       u32 group, struct nlmsghdr *nlh, gfp_t flags);
+int genl_unregister_family(struct genl_family *family);
+int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+int genl_register_mc_group(struct genl_family *family,
+                          struct genl_multicast_group *grp);
+void genl_unregister_mc_group(struct genl_family *family,
+                             struct genl_multicast_group *grp);
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
+                u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
-                               struct genl_family *family, int flags, u8 cmd);
+                 struct genl_family *family, int flags, u8 cmd);
 
 /**
  * genlmsg_nlhdr - Obtain netlink header from user specified header
index 57e4afdf7879ca9436fa139c31e3f162a407e502..dcd9ae3270d386b06d7565cfb8b7c4bb59dde740 100644 (file)
@@ -38,7 +38,13 @@ void gre_offload_exit(void);
 
 void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
                      int hdr_len);
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+                                                 bool gre_csum)
+{
+       return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE);
+}
+
 
 static inline int ip_gre_calc_hlen(__be16 o_flags)
 {
index 081439fd070ed7ce932ff1e1877398d1d119207c..970028e13382807108764f8784d9b1f11d7078d7 100644 (file)
@@ -39,10 +39,10 @@ struct net_proto_family;
 struct sk_buff;
 struct net;
 
-extern void    icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
-extern int     icmp_rcv(struct sk_buff *skb);
-extern void    icmp_err(struct sk_buff *, u32 info);
-extern int     icmp_init(void);
-extern void    icmp_out_count(struct net *net, unsigned char type);
+void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+int icmp_rcv(struct sk_buff *skb);
+void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
 
 #endif /* _ICMP_H */
index 04642c920431151fd09f00bc1400c164e0a2f9c0..f981ba7adeed4c5dbaba8d5e662768d3503bfec7 100644 (file)
@@ -22,27 +22,25 @@ struct sk_buff;
 struct sock;
 struct sockaddr;
 
-extern int inet6_csk_bind_conflict(const struct sock *sk,
-                                  const struct inet_bind_bucket *tb, bool relax);
+int inet6_csk_bind_conflict(const struct sock *sk,
+                           const struct inet_bind_bucket *tb, bool relax);
 
-extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
-                                            struct flowi6 *fl6,
-                                            const struct request_sock *req);
+struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+                                     const struct request_sock *req);
 
-extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
-                                                struct request_sock ***prevp,
-                                                const __be16 rport,
-                                                const struct in6_addr *raddr,
-                                                const struct in6_addr *laddr,
-                                                const int iif);
+struct request_sock *inet6_csk_search_req(const struct sock *sk,
+                                         struct request_sock ***prevp,
+                                         const __be16 rport,
+                                         const struct in6_addr *raddr,
+                                         const struct in6_addr *laddr,
+                                         const int iif);
 
-extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
-                                          struct request_sock *req,
-                                          const unsigned long timeout);
+void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+                                   const unsigned long timeout);
 
-extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
 
-extern struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET6_CONNECTION_SOCK_H */
index fd4ee016ba5cd5e4dc87b08cf06aef17a6b9cc27..ae061354430836687225be9ae1a8c1f09a35ed8a 100644 (file)
 
 struct inet_hashinfo;
 
-static inline unsigned int inet6_ehashfn(struct net *net,
-                               const struct in6_addr *laddr, const u16 lport,
-                               const struct in6_addr *faddr, const __be16 fport)
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+                                   const u16 lport,
+                                   const u32 fhash,
+                                   const __be16 fport,
+                                   const u32 initval)
 {
-       u32 ports = (((u32)lport) << 16) | (__force u32)fport;
-
-       return jhash_3words((__force u32)laddr->s6_addr32[3],
-                           ipv6_addr_jhash(faddr),
-                           ports,
-                           inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet6_sk_ehashfn(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       const struct in6_addr *laddr = &np->rcv_saddr;
-       const struct in6_addr *faddr = &np->daddr;
-       const __u16 lport = inet->inet_num;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet6_ehashfn(net, laddr, lport, faddr, fport);
+       const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+       return jhash_3words(lhash, fhash, ports, initval);
 }
 
-extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
 
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
@@ -61,21 +46,19 @@ extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
  *
  * The sockhash lock must be held as a reader here.
  */
-extern struct sock *__inet6_lookup_established(struct net *net,
-                                          struct inet_hashinfo *hashinfo,
-                                          const struct in6_addr *saddr,
-                                          const __be16 sport,
-                                          const struct in6_addr *daddr,
-                                          const u16 hnum,
-                                          const int dif);
-
-extern struct sock *inet6_lookup_listener(struct net *net,
-                                         struct inet_hashinfo *hashinfo,
-                                         const struct in6_addr *saddr,
-                                         const __be16 sport,
-                                         const struct in6_addr *daddr,
-                                         const unsigned short hnum,
-                                         const int dif);
+struct sock *__inet6_lookup_established(struct net *net,
+                                       struct inet_hashinfo *hashinfo,
+                                       const struct in6_addr *saddr,
+                                       const __be16 sport,
+                                       const struct in6_addr *daddr,
+                                       const u16 hnum, const int dif);
+
+struct sock *inet6_lookup_listener(struct net *net,
+                                  struct inet_hashinfo *hashinfo,
+                                  const struct in6_addr *saddr,
+                                  const __be16 sport,
+                                  const struct in6_addr *daddr,
+                                  const unsigned short hnum, const int dif);
 
 static inline struct sock *__inet6_lookup(struct net *net,
                                          struct inet_hashinfo *hashinfo,
@@ -110,9 +93,9 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
                              inet6_iif(skb));
 }
 
-extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
-                                const struct in6_addr *saddr, const __be16 sport,
-                                const struct in6_addr *daddr, const __be16 dport,
-                                const int dif);
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+                         const struct in6_addr *saddr, const __be16 sport,
+                         const struct in6_addr *daddr, const __be16 dport,
+                         const int dif);
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
index 234008782c8cca00e2af6394c17391189abd974a..fe7994c48b75685174134e7817bbb20ef375f890 100644 (file)
@@ -13,30 +13,30 @@ struct sock;
 struct sockaddr;
 struct socket;
 
-extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                              int addr_len, int flags);
-extern int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                                int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
-                             int addr_len, int flags);
-extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size);
-extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
-                            size_t size, int flags);
-extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size, int flags);
-extern int inet_shutdown(struct socket *sock, int how);
-extern int inet_listen(struct socket *sock, int backlog);
-extern void inet_sock_destruct(struct sock *sk);
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-extern int inet_getname(struct socket *sock, struct sockaddr *uaddr,
-                       int *uaddr_len, int peer);
-extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
-                               unsigned short type, unsigned char protocol,
-                               struct net *net);
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                       int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                         int addr_len, int flags);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+                      int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+                size_t size);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+                     size_t size, int flags);
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+                size_t size, int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+                int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+                        unsigned short type, unsigned char protocol,
+                        struct net *net);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
index de2c78529afaf81f00abff62bb29708f7f6941a2..c55aeed41acea4ce12b49b2d77862a4cf609363a 100644 (file)
@@ -146,9 +146,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
        return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone_lock(const struct sock *sk,
-                                       const struct request_sock *req,
-                                       const gfp_t priority);
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                const struct request_sock *req,
+                                const gfp_t priority);
 
 enum inet_csk_ack_state_t {
        ICSK_ACK_SCHED  = 1,
@@ -157,11 +157,11 @@ enum inet_csk_ack_state_t {
        ICSK_ACK_PUSHED2 = 8
 };
 
-extern void inet_csk_init_xmit_timers(struct sock *sk,
-                                     void (*retransmit_handler)(unsigned long),
-                                     void (*delack_handler)(unsigned long),
-                                     void (*keepalive_handler)(unsigned long));
-extern void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_init_xmit_timers(struct sock *sk,
+                              void (*retransmit_handler)(unsigned long),
+                              void (*delack_handler)(unsigned long),
+                              void (*keepalive_handler)(unsigned long));
+void inet_csk_clear_xmit_timers(struct sock *sk);
 
 static inline void inet_csk_schedule_ack(struct sock *sk)
 {
@@ -178,8 +178,8 @@ static inline void inet_csk_delack_init(struct sock *sk)
        memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 }
 
-extern void inet_csk_delete_keepalive_timer(struct sock *sk);
-extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 
 #ifdef INET_CSK_DEBUG
 extern const char inet_csk_timer_bug_msg[];
@@ -241,23 +241,21 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
 #endif
 }
 
-extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-extern struct request_sock *inet_csk_search_req(const struct sock *sk,
-                                               struct request_sock ***prevp,
-                                               const __be16 rport,
-                                               const __be32 raddr,
-                                               const __be32 laddr);
-extern int inet_csk_bind_conflict(const struct sock *sk,
-                                 const struct inet_bind_bucket *tb, bool relax);
-extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+                                        struct request_sock ***prevp,
+                                        const __be16 rport,
+                                        const __be32 raddr,
+                                        const __be32 laddr);
+int inet_csk_bind_conflict(const struct sock *sk,
+                          const struct inet_bind_bucket *tb, bool relax);
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
-extern struct dst_entry* inet_csk_route_req(struct sock *sk,
-                                           struct flowi4 *fl4,
+struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+                                    const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
                                            const struct request_sock *req);
-extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
-                                                  struct sock *newsk,
-                                                  const struct request_sock *req);
 
 static inline void inet_csk_reqsk_queue_add(struct sock *sk,
                                            struct request_sock *req,
@@ -266,9 +264,8 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
        reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
 }
 
-extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
-                                         struct request_sock *req,
-                                         unsigned long timeout);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+                                  unsigned long timeout);
 
 static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
                                                struct request_sock *req)
@@ -315,13 +312,13 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
        reqsk_free(req);
 }
 
-extern void inet_csk_reqsk_queue_prune(struct sock *parent,
-                                      const unsigned long interval,
-                                      const unsigned long timeout,
-                                      const unsigned long max_rto);
+void inet_csk_reqsk_queue_prune(struct sock *parent,
+                               const unsigned long interval,
+                               const unsigned long timeout,
+                               const unsigned long max_rto);
 
-extern void inet_csk_destroy_sock(struct sock *sk);
-extern void inet_csk_prepare_forced_close(struct sock *sk);
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
 
 /*
  * LISTEN is a special case for poll..
@@ -332,15 +329,15 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
                        (POLLIN | POLLRDNORM) : 0;
 }
 
-extern int  inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
-extern void inet_csk_listen_stop(struct sock *sk);
+int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+void inet_csk_listen_stop(struct sock *sk);
 
-extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int __user *optlen);
-extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, unsigned int optlen);
+int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+                              char __user *optval, int __user *optlen);
+int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+                              char __user *optval, unsigned int optlen);
 
-extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET_CONNECTION_SOCK_H */
index bfcbc0017950372ddd39dd540ff3f6da99f2bee6..6f59de98dabde3be0daa202f8be4528d49676f7f 100644 (file)
@@ -64,6 +64,10 @@ struct inet_frags {
        rwlock_t                lock ____cacheline_aligned_in_smp;
        int                     secret_interval;
        struct timer_list       secret_timer;
+
+       /* The first call to hashfn is responsible to initialize
+        * rnd. This is best done with net_get_random_once.
+        */
        u32                     rnd;
        int                     qsize;
 
index ef83d9e844b54f0468034ce54da3d4552868f7e9..1bdb47715def0e21496ae89a59d3d9bd5f1f2c81 100644 (file)
 #include <asm/byteorder.h>
 
 /* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
  */
 struct inet_ehash_bucket {
        struct hlist_nulls_head chain;
-       struct hlist_nulls_head twchain;
 };
 
 /* There are a few simple rules, which allow for local port reuse by
@@ -123,7 +122,6 @@ struct inet_hashinfo {
         *
         *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
         *
-        * TIME_WAIT sockets use a separate chain (twchain).
         */
        struct inet_ehash_bucket        *ehash;
        spinlock_t                      *ehash_locks;
@@ -218,22 +216,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
        }
 }
 
-extern struct inet_bind_bucket *
-                   inet_bind_bucket_create(struct kmem_cache *cachep,
-                                           struct net *net,
-                                           struct inet_bind_hashbucket *head,
-                                           const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
-                                    struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+                       struct inet_bind_hashbucket *head,
+                       const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+                             struct inet_bind_bucket *tb);
 
-static inline int inet_bhashfn(struct net *net,
-               const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+                              const int bhash_size)
 {
        return (lport + net_hash_mix(net)) & (bhash_size - 1);
 }
 
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
-                          const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+                   const unsigned short snum);
 
 /* These can have wildcards, don't try too hard. */
 static inline int inet_lhashfn(struct net *net, const unsigned short num)
@@ -247,23 +244,22 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
 }
 
 /* Caller must disable local BH processing. */
-extern int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(struct sock *sk, struct sock *child);
 
-extern void inet_put_port(struct sock *sk);
+void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
 
-extern struct sock *__inet_lookup_listener(struct net *net,
-                                          struct inet_hashinfo *hashinfo,
-                                          const __be32 saddr,
-                                          const __be16 sport,
-                                          const __be32 daddr,
-                                          const unsigned short hnum,
-                                          const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+                                   struct inet_hashinfo *hashinfo,
+                                   const __be32 saddr, const __be16 sport,
+                                   const __be32 daddr,
+                                   const unsigned short hnum,
+                                   const int dif);
 
 static inline struct sock *inet_lookup_listener(struct net *net,
                struct inet_hashinfo *hashinfo,
@@ -304,30 +300,17 @@ static inline struct sock *inet_lookup_listener(struct net *net,
                                   ((__force __u64)(__be32)(__saddr)));
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)    \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
-        (inet_sk(__sk)->inet_addrpair == (__cookie))           &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
+        ((__sk)->sk_addrpair == (__cookie))                    &&      \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_addrpair == (__cookie))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_sk(__sk)->inet_portpair == (__ports))    &&              \
-        (inet_sk(__sk)->inet_daddr     == (__saddr))   &&              \
-        (inet_sk(__sk)->inet_rcv_saddr == (__daddr))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_daddr     == (__saddr))   &&              \
-        (inet_twsk(__sk)->tw_rcv_saddr == (__daddr))   &&              \
+       (((__sk)->sk_portpair == (__ports))             &&              \
+        ((__sk)->sk_daddr      == (__saddr))           &&              \
+        ((__sk)->sk_rcv_saddr  == (__daddr))           &&              \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
         net_eq(sock_net(__sk), (__net)))
@@ -339,10 +322,11 @@ static inline struct sock *inet_lookup_listener(struct net *net,
  *
  * Local BH must be disabled here.
  */
-extern struct sock * __inet_lookup_established(struct net *net,
-               struct inet_hashinfo *hashinfo,
-               const __be32 saddr, const __be16 sport,
-               const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+                                      struct inet_hashinfo *hashinfo,
+                                      const __be32 saddr, const __be16 sport,
+                                      const __be32 daddr, const u16 hnum,
+                                      const int dif);
 
 static inline struct sock *
        inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -399,13 +383,14 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
                                     iph->daddr, dport, inet_iif(skb));
 }
 
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
-               struct sock *sk,
-               u32 port_offset,
-               int (*check_established)(struct inet_timewait_death_row *,
-                       struct sock *, __u16, struct inet_timewait_sock **),
-               int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+                       struct sock *sk, u32 port_offset,
+                       int (*check_established)(struct inet_timewait_death_row *,
+                                                struct sock *, __u16,
+                                                struct inet_timewait_sock **),
+                       int (*hash)(struct sock *sk,
+                                   struct inet_timewait_sock *twp));
 
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
-                            struct sock *sk);
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+                     struct sock *sk);
 #endif /* _INET_HASHTABLES_H */
index b21a7f06d6a4955910b577a23b04eb904207265a..1833c3f389ee64a0c6b3862d4f2fbc6db0984b0a 100644 (file)
@@ -70,13 +70,14 @@ struct ip_options_data {
 
 struct inet_request_sock {
        struct request_sock     req;
-#if IS_ENABLED(CONFIG_IPV6)
-       u16                     inet6_rsk_offset;
-#endif
-       __be16                  loc_port;
-       __be32                  loc_addr;
-       __be32                  rmt_addr;
-       __be16                  rmt_port;
+#define ir_loc_addr            req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr            req.__req_common.skc_daddr
+#define ir_num                 req.__req_common.skc_num
+#define ir_rmt_port            req.__req_common.skc_dport
+#define ir_v6_rmt_addr         req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr         req.__req_common.skc_v6_rcv_saddr
+#define ir_iif                 req.__req_common.skc_bound_dev_if
+
        kmemcheck_bitfield_begin(flags);
        u16                     snd_wscale : 4,
                                rcv_wscale : 4,
@@ -88,6 +89,7 @@ struct inet_request_sock {
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
        struct ip_options_rcu   *opt;
+       struct sk_buff          *pktopts;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -103,6 +105,9 @@ struct inet_cork {
        int                     length; /* Total length of all frames */
        struct dst_entry        *dst;
        u8                      tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 struct inet_cork_full {
@@ -143,10 +148,8 @@ struct inet_sock {
        /* Socket demultiplex comparisons on incoming packets. */
 #define inet_daddr             sk.__sk_common.skc_daddr
 #define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
-#define inet_addrpair          sk.__sk_common.skc_addrpair
 #define inet_dport             sk.__sk_common.skc_dport
 #define inet_num               sk.__sk_common.skc_num
-#define inet_portpair          sk.__sk_common.skc_portpair
 
        __be32                  inet_saddr;
        __s16                   uc_ttl;
@@ -199,32 +202,18 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 }
 #endif
 
-extern int inet_sk_rebuild_header(struct sock *sk);
-
-extern u32 inet_ehash_secret;
-extern u32 ipv6_hash_secret;
-extern void build_ehash_secret(void);
+int inet_sk_rebuild_header(struct sock *sk);
 
-static inline unsigned int inet_ehashfn(struct net *net,
-                                       const __be32 laddr, const __u16 lport,
-                                       const __be32 faddr, const __be16 fport)
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+                                         const __u16 lport,
+                                         const __be32 faddr,
+                                         const __be16 fport,
+                                         u32 initval)
 {
        return jhash_3words((__force __u32) laddr,
                            (__force __u32) faddr,
                            ((__u32) lport) << 16 | (__force __u32)fport,
-                           inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet_sk_ehashfn(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       const __be32 laddr = inet->inet_rcv_saddr;
-       const __u16 lport = inet->inet_num;
-       const __be32 faddr = inet->inet_daddr;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet_ehashfn(net, laddr, lport, faddr, fport);
+                           initval);
 }
 
 static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
index f908dfc06505850b54e6868c53da957ff71c3527..71c6e264e5b521bfa43033b13a0524ac2556d002 100644 (file)
@@ -58,6 +58,11 @@ struct inet_hashinfo;
 # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
 #endif
 
+static inline u32 inet_tw_time_stamp(void)
+{
+       return jiffies;
+}
+
 /* TIME_WAIT reaping mechanism. */
 #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
 
@@ -83,9 +88,9 @@ struct inet_timewait_death_row {
        int                     sysctl_max_tw_buckets;
 };
 
-extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(struct work_struct *work);
-extern void inet_twdr_twcal_tick(unsigned long data);
+void inet_twdr_hangman(unsigned long data);
+void inet_twdr_twkill_work(struct work_struct *work);
+void inet_twdr_twcal_tick(unsigned long data);
 
 struct inet_bind_bucket;
 
@@ -111,11 +116,11 @@ struct inet_timewait_sock {
 #define tw_prot                        __tw_common.skc_prot
 #define tw_net                 __tw_common.skc_net
 #define tw_daddr               __tw_common.skc_daddr
+#define tw_v6_daddr            __tw_common.skc_v6_daddr
 #define tw_rcv_saddr           __tw_common.skc_rcv_saddr
-#define tw_addrpair            __tw_common.skc_addrpair
+#define tw_v6_rcv_saddr        __tw_common.skc_v6_rcv_saddr
 #define tw_dport               __tw_common.skc_dport
 #define tw_num                 __tw_common.skc_num
-#define tw_portpair            __tw_common.skc_portpair
 
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
@@ -130,26 +135,14 @@ struct inet_timewait_sock {
                                tw_transparent  : 1,
                                tw_pad          : 6,    /* 6 bits hole */
                                tw_tos          : 8,
-                               tw_ipv6_offset  : 16;
+                               tw_pad2         : 16;   /* 16 bits hole */
        kmemcheck_bitfield_end(flags);
-       unsigned long           tw_ttd;
+       u32                     tw_ttd;
        struct inet_bind_bucket *tw_tb;
        struct hlist_node       tw_death_node;
 };
 #define tw_tclass tw_tos
 
-static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-                                     struct hlist_nulls_head *list)
-{
-       hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
-static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
-                                          struct hlist_head *list)
-{
-       hlist_add_head(&tw->tw_bind_node, list);
-}
-
 static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
 {
        return !hlist_unhashed(&tw->tw_death_node);
@@ -189,34 +182,28 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
        return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 sk_rcv_saddr(const struct sock *sk)
-{
-/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
-       return sk->__sk_common.skc_rcv_saddr;
-}
-
-extern void inet_twsk_put(struct inet_timewait_sock *tw);
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
 
-extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+int inet_twsk_unhash(struct inet_timewait_sock *tw);
 
-extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-                                struct inet_hashinfo *hashinfo);
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+                         struct inet_hashinfo *hashinfo);
 
-extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
-                                                 const int state);
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+                                          const int state);
 
-extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
-                                 struct sock *sk,
-                                 struct inet_hashinfo *hashinfo);
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+                          struct inet_hashinfo *hashinfo);
 
-extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
-                              struct inet_timewait_death_row *twdr,
-                              const int timeo, const int timewait_len);
-extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
-                                struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw,
+                       struct inet_timewait_death_row *twdr,
+                       const int timeo, const int timewait_len);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw,
+                         struct inet_timewait_death_row *twdr);
 
-extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
-                           struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+                    struct inet_timewait_death_row *twdr, int family);
 
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
index 53f464d7cddcd6ce18925c521b99bfaefac66f2f..f4e127af4e179dd022007ed4a416ab32c9146533 100644 (file)
@@ -120,9 +120,9 @@ static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from
        }
 }
 
-extern void inet_peer_base_init(struct inet_peer_base *);
+void inet_peer_base_init(struct inet_peer_base *);
 
-void                   inet_initpeers(void) __init;
+void inet_initpeers(void) __init;
 
 #define INETPEER_METRICS_NEW   (~(u32) 0)
 
@@ -159,11 +159,11 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
 }
 
 /* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
-extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
-extern void inetpeer_invalidate_tree(struct inet_peer_base *);
-extern void inetpeer_invalidate_family(int family);
+void inetpeer_invalidate_tree(struct inet_peer_base *);
+void inetpeer_invalidate_family(int family);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
index 5e5268807a1ceb521f673ca05ec3233389a97f61..217bc5bfc6c6e99cf52117312ceda95c98f005a6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/skbuff.h>
 
 #include <net/inet_sock.h>
+#include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 
@@ -56,6 +57,9 @@ struct ipcm_cookie {
        int                     oif;
        struct ip_options_rcu   *opt;
        __u8                    tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -86,64 +90,71 @@ struct packet_type;
 struct rtable;
 struct sockaddr;
 
-extern int             igmp_mc_proc_init(void);
+int igmp_mc_proc_init(void);
 
 /*
  *     Functions provided by ip.c
  */
 
-extern int             ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
-                                             __be32 saddr, __be32 daddr,
-                                             struct ip_options_rcu *opt);
-extern int             ip_rcv(struct sk_buff *skb, struct net_device *dev,
-                              struct packet_type *pt, struct net_device *orig_dev);
-extern int             ip_local_deliver(struct sk_buff *skb);
-extern int             ip_mr_input(struct sk_buff *skb);
-extern int             ip_output(struct sk_buff *skb);
-extern int             ip_mc_output(struct sk_buff *skb);
-extern int             ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-extern int             ip_do_nat(struct sk_buff *skb);
-extern void            ip_send_check(struct iphdr *ip);
-extern int             __ip_local_out(struct sk_buff *skb);
-extern int             ip_local_out(struct sk_buff *skb);
-extern int             ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
-extern void            ip_init(void);
-extern int             ip_append_data(struct sock *sk, struct flowi4 *fl4,
-                                      int getfrag(void *from, char *to, int offset, int len,
-                                                  int odd, struct sk_buff *skb),
-                               void *from, int len, int protolen,
-                               struct ipcm_cookie *ipc,
-                               struct rtable **rt,
-                               unsigned int flags);
-extern int             ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
-extern ssize_t         ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
-                               int offset, size_t size, int flags);
-extern struct sk_buff  *__ip_make_skb(struct sock *sk,
-                                     struct flowi4 *fl4,
-                                     struct sk_buff_head *queue,
-                                     struct inet_cork *cork);
-extern int             ip_send_skb(struct net *net, struct sk_buff *skb);
-extern int             ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
-extern void            ip_flush_pending_frames(struct sock *sk);
-extern struct sk_buff  *ip_make_skb(struct sock *sk,
-                                   struct flowi4 *fl4,
-                                   int getfrag(void *from, char *to, int offset, int len,
-                                               int odd, struct sk_buff *skb),
-                                   void *from, int length, int transhdrlen,
-                                   struct ipcm_cookie *ipc,
-                                   struct rtable **rtp,
-                                   unsigned int flags);
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+                         __be32 saddr, __be32 daddr,
+                         struct ip_options_rcu *opt);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+          struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct sk_buff *skb);
+int ip_mc_output(struct sk_buff *skb);
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_do_nat(struct sk_buff *skb);
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct sk_buff *skb);
+int ip_local_out(struct sk_buff *skb);
+int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+                  int getfrag(void *from, char *to, int offset, int len,
+                              int odd, struct sk_buff *skb),
+                  void *from, int len, int protolen,
+                  struct ipcm_cookie *ipc,
+                  struct rtable **rt,
+                  unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+                      struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+                      int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+                             struct sk_buff_head *queue,
+                             struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           struct ipcm_cookie *ipc, struct rtable **rtp,
+                           unsigned int flags);
 
 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 {
        return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
 }
 
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+       return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
+
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+       return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
 /* datagram.c */
-extern int             ip4_datagram_connect(struct sock *sk, 
-                                            struct sockaddr *uaddr, int addr_len);
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
-extern void ip4_datagram_release_cb(struct sock *sk);
+void ip4_datagram_release_cb(struct sock *sk);
 
 struct ip_reply_arg {
        struct kvec iov[1];   
@@ -184,16 +195,16 @@ extern struct ipv4_config ipv4_config;
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib[], int offt);
 #if BITS_PER_LONG==32
-extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
 #else
 static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
 {
        return snmp_fold_field(mib, offt);
 }
 #endif
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 
 static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
 {
@@ -206,11 +217,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
        }
 }
 
-extern struct local_ports {
-       seqlock_t       lock;
-       int             range[2];
-} sysctl_local_ports;
-extern void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
 
 extern unsigned long *sysctl_local_reserved_ports;
 static inline int inet_is_reserved_local_port(int port)
@@ -231,9 +238,9 @@ extern int sysctl_ip_early_demux;
 /* From ip_output.c */
 extern int sysctl_ip_dynaddr;
 
-extern void ipfrag_init(void);
+void ipfrag_init(void);
 
-extern void ip_static_sysctl_init(void);
+void ip_static_sysctl_init(void);
 
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
@@ -262,7 +269,7 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
                 !(dst_metric_locked(dst, RTAX_MTU)));
 }
 
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
 
 static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
 {
@@ -367,7 +374,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
                struct ipv6_pinfo *np = inet6_sk(sk);
 
                memset(&np->saddr, 0, sizeof(np->saddr));
-               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
        }
 #endif
 }
@@ -390,7 +397,7 @@ static inline int sk_mc_loop(struct sock *sk)
        return 1;
 }
 
-extern bool ip_call_ra_chain(struct sk_buff *skb);
+bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
  *     Functions provided by ip_fragment.c
@@ -428,50 +435,52 @@ int ip_frag_nqueues(struct net *net);
  *     Functions provided by ip_forward.c
  */
  
-extern int ip_forward(struct sk_buff *skb);
+int ip_forward(struct sk_buff *skb);
  
 /*
  *     Functions provided by ip_options.c
  */
  
-extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
-                            __be32 daddr, struct rtable *rt, int is_frag);
-extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
-extern void ip_options_fragment(struct sk_buff *skb);
-extern int ip_options_compile(struct net *net,
-                             struct ip_options *opt, struct sk_buff *skb);
-extern int ip_options_get(struct net *net, struct ip_options_rcu **optp,
-                         unsigned char *data, int optlen);
-extern int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
-                                   unsigned char __user *data, int optlen);
-extern void ip_options_undo(struct ip_options * opt);
-extern void ip_forward_options(struct sk_buff *skb);
-extern int ip_options_rcv_srr(struct sk_buff *skb);
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+                     __be32 daddr, struct rtable *rt, int is_frag);
+int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+void ip_options_fragment(struct sk_buff *skb);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+                      struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+                  unsigned char *data, int optlen);
+int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+                            unsigned char __user *data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb);
 
 /*
  *     Functions provided by ip_sockglue.c
  */
 
-extern void    ipv4_pktinfo_prepare(struct sk_buff *skb);
-extern void    ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-extern int     ip_cmsg_send(struct net *net,
-                            struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int     ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
-extern int     ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
-extern int     compat_ip_setsockopt(struct sock *sk, int level,
-                       int optname, char __user *optval, unsigned int optlen);
-extern int     compat_ip_getsockopt(struct sock *sk, int level,
-                       int optname, char __user *optval, int __user *optlen);
-extern int     ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
-
-extern int     ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void    ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 
-                             __be16 port, u32 info, u8 *payload);
-extern void    ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
-                              u32 info);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+                 unsigned int optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+                 int __user *optlen);
+int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+                        char __user *optval, unsigned int optlen);
+int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+                        char __user *optval, int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+                 void (*destructor)(struct sock *));
+
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+                  u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+                   u32 info);
 
 #ifdef CONFIG_PROC_FS
-extern int ip_misc_proc_init(void);
+int ip_misc_proc_init(void);
 #endif
 
 #endif /* _IP_H */
index 7686e3f5033d8364da7aad69be58b86018af7589..9e3c540c1b110c71b65003a6aac22cc6c333be5a 100644 (file)
@@ -66,12 +66,14 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
        }
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
 
-       __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+       __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
 }
+#endif
 
 int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
 #endif
index 48ec25a7fcb60a09a2c323c70221a31853a82261..6738f3409a6f701e82f431a760750f749863a73c 100644 (file)
@@ -267,48 +267,39 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
  *     exported functions
  */
 
-extern struct fib6_table        *fib6_get_table(struct net *net, u32 id);
-extern struct fib6_table        *fib6_new_table(struct net *net, u32 id);
-extern struct dst_entry         *fib6_rule_lookup(struct net *net,
-                                                 struct flowi6 *fl6, int flags,
-                                                 pol_lookup_t lookup);
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+                                  int flags, pol_lookup_t lookup);
 
-extern struct fib6_node                *fib6_lookup(struct fib6_node *root,
-                                            const struct in6_addr *daddr,
-                                            const struct in6_addr *saddr);
+struct fib6_node *fib6_lookup(struct fib6_node *root,
+                             const struct in6_addr *daddr,
+                             const struct in6_addr *saddr);
 
-struct fib6_node               *fib6_locate(struct fib6_node *root,
-                                            const struct in6_addr *daddr, int dst_len,
-                                            const struct in6_addr *saddr, int src_len);
+struct fib6_node *fib6_locate(struct fib6_node *root,
+                             const struct in6_addr *daddr, int dst_len,
+                             const struct in6_addr *saddr, int src_len);
 
-extern void                    fib6_clean_all_ro(struct net *net,
-                                              int (*func)(struct rt6_info *, void *arg),
-                                              int prune, void *arg);
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+                   int prune, void *arg);
 
-extern void                    fib6_clean_all(struct net *net,
-                                              int (*func)(struct rt6_info *, void *arg),
-                                              int prune, void *arg);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
 
-extern int                     fib6_add(struct fib6_node *root,
-                                        struct rt6_info *rt,
-                                        struct nl_info *info);
+int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
-extern int                     fib6_del(struct rt6_info *rt,
-                                        struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
 
-extern void                    inet6_rt_notify(int event, struct rt6_info *rt,
-                                               struct nl_info *info);
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
 
-extern void                    fib6_run_gc(unsigned long expires,
-                                           struct net *net, bool force);
+void fib6_gc_cleanup(void);
 
-extern void                    fib6_gc_cleanup(void);
+int fib6_init(void);
 
-extern int                     fib6_init(void);
+int ipv6_route_open(struct inode *inode, struct file *file);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern int                     fib6_rules_init(void);
-extern void                    fib6_rules_cleanup(void);
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
 #else
 static inline int               fib6_rules_init(void)
 {
index f525e7038cca4eaae4fd7b011ae1f97072ddffc9..733747ce163c1a08a91a24eb5277832f5c04d6ee 100644 (file)
@@ -51,7 +51,7 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
        return (flags >> 3) & 7;
 }
 
-extern void rt6_bind_peer(struct rt6_info *rt, int create);
+void rt6_bind_peer(struct rt6_info *rt, int create);
 
 static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
 {
@@ -72,70 +72,58 @@ static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
        return __rt6_get_peer(rt, 1);
 }
 
-extern void                    ip6_route_input(struct sk_buff *skb);
+void ip6_route_input(struct sk_buff *skb);
 
-extern struct dst_entry *      ip6_route_output(struct net *net,
-                                                const struct sock *sk,
-                                                struct flowi6 *fl6);
-extern struct dst_entry *      ip6_route_lookup(struct net *net,
-                                                struct flowi6 *fl6, int flags);
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+                                  struct flowi6 *fl6);
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+                                  int flags);
 
-extern int                     ip6_route_init(void);
-extern void                    ip6_route_cleanup(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
 
-extern int                     ipv6_route_ioctl(struct net *net,
-                                                unsigned int cmd,
-                                                void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
 
-extern int                     ip6_route_add(struct fib6_config *cfg);
-extern int                     ip6_ins_rt(struct rt6_info *);
-extern int                     ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg);
+int ip6_ins_rt(struct rt6_info *);
+int ip6_del_rt(struct rt6_info *);
 
-extern int                     ip6_route_get_saddr(struct net *net,
-                                                   struct rt6_info *rt,
-                                                   const struct in6_addr *daddr,
-                                                   unsigned int prefs,
-                                                   struct in6_addr *saddr);
+int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+                       const struct in6_addr *daddr, unsigned int prefs,
+                       struct in6_addr *saddr);
 
-extern struct rt6_info         *rt6_lookup(struct net *net,
-                                           const struct in6_addr *daddr,
-                                           const struct in6_addr *saddr,
-                                           int oif, int flags);
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+                           const struct in6_addr *saddr, int oif, int flags);
 
-extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
-                                        struct flowi6 *fl6);
-extern int icmp6_dst_gc(void);
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+int icmp6_dst_gc(void);
 
-extern void fib6_force_start_gc(struct net *net);
+void fib6_force_start_gc(struct net *net);
 
-extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
-                                          const struct in6_addr *addr,
-                                          bool anycast);
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+                                   const struct in6_addr *addr, bool anycast);
 
 /*
  *     support functions for ND
  *
  */
-extern struct rt6_info *       rt6_get_dflt_router(const struct in6_addr *addr,
-                                                   struct net_device *dev);
-extern struct rt6_info *       rt6_add_dflt_router(const struct in6_addr *gwaddr,
-                                                   struct net_device *dev,
-                                                   unsigned int pref);
-
-extern void                    rt6_purge_dflt_routers(struct net *net);
-
-extern int                     rt6_route_rcv(struct net_device *dev,
-                                             u8 *opt, int len,
-                                             const struct in6_addr *gwaddr);
-
-extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-                           int oif, u32 mark);
-extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
-                              __be32 mtu);
-extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
-extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
-                                  u32 mark);
-extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
+struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+                                    struct net_device *dev);
+struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+                                    struct net_device *dev, unsigned int pref);
+
+void rt6_purge_dflt_routers(struct net *net);
+
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+                 const struct in6_addr *gwaddr);
+
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+                    u32 mark);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+                           u32 mark);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
 
 struct netlink_callback;
 
@@ -145,10 +133,10 @@ struct rt6_rtnl_dump_arg {
        struct net *net;
 };
 
-extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
-extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+void rt6_ifdown(struct net *net, struct net_device *dev);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 
 
 /*
@@ -194,11 +182,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
 {
-       if (rt->rt6i_flags & RTF_GATEWAY)
-               return &rt->rt6i_gateway;
-       return dest;
+       return &rt->rt6i_gateway;
 }
 
 #endif
index cbf2be37c91ac603dc883fa3890e62ec1b5c7a80..9922093f575e20fa2e53a903a68da7773469c600 100644 (file)
@@ -165,7 +165,7 @@ struct fib_result_nl {
 #define FIB_TABLE_HASHSZ 2
 #endif
 
-extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 
 #define FIB_RES_SADDR(net, res)                                \
        ((FIB_RES_NH(res).nh_saddr_genid ==             \
@@ -187,14 +187,14 @@ struct fib_table {
        unsigned long           tb_data[0];
 };
 
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
-                           struct fib_result *res, int fib_flags);
-extern int fib_table_insert(struct fib_table *, struct fib_config *);
-extern int fib_table_delete(struct fib_table *, struct fib_config *);
-extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
-                         struct netlink_callback *cb);
-extern int fib_table_flush(struct fib_table *table);
-extern void fib_free_table(struct fib_table *tb);
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+                    struct fib_result *res, int fib_flags);
+int fib_table_insert(struct fib_table *, struct fib_config *);
+int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+                  struct netlink_callback *cb);
+int fib_table_flush(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
 
 
 
@@ -234,14 +234,13 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
 }
 
 #else /* CONFIG_IP_MULTIPLE_TABLES */
-extern int __net_init fib4_rules_init(struct net *net);
-extern void __net_exit fib4_rules_exit(struct net *net);
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
 
-extern struct fib_table *fib_new_table(struct net *net, u32 id);
-extern struct fib_table *fib_get_table(struct net *net, u32 id);
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
 
-extern int __fib_lookup(struct net *net, struct flowi4 *flp,
-                       struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
 
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
                             struct fib_result *res)
@@ -269,12 +268,12 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
 
 /* Exported by fib_frontend.c */
 extern const struct nla_policy rtm_ipv4_policy[];
-extern void            ip_fib_init(void);
-extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
-extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
-                              u8 tos, int oif, struct net_device *dev,
-                              struct in_device *idev, u32 *itag);
-extern void fib_select_default(struct fib_result *res);
+void ip_fib_init(void);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+                       u8 tos, int oif, struct net_device *dev,
+                       struct in_device *idev, u32 *itag);
+void fib_select_default(struct fib_result *res);
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
@@ -288,15 +287,15 @@ static inline int fib_num_tclassid_users(struct net *net)
 #endif
 
 /* Exported by fib_semantics.c */
-extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
-extern int fib_sync_down_dev(struct net_device *dev, int force);
-extern int fib_sync_down_addr(struct net *net, __be32 local);
-extern int fib_sync_up(struct net_device *dev);
-extern void fib_select_multipath(struct fib_result *res);
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_up(struct net_device *dev);
+void fib_select_multipath(struct fib_result *res);
 
 /* Exported by fib_trie.c */
-extern void fib_trie_init(void);
-extern struct fib_table *fib_trie_table(u32 id);
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id);
 
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
@@ -314,7 +313,7 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 #endif
 }
 
-extern void free_fib_info(struct fib_info *fi);
+void free_fib_info(struct fib_info *fi);
 
 static inline void fib_info_put(struct fib_info *fi)
 {
@@ -323,8 +322,8 @@ static inline void fib_info_put(struct fib_info *fi)
 }
 
 #ifdef CONFIG_PROC_FS
-extern int __net_init  fib_proc_init(struct net *net);
-extern void __net_exit fib_proc_exit(struct net *net);
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
 #else
 static inline int fib_proc_init(struct net *net)
 {
index a0a4a100f5c9a74ce544097dee5f3d25e68e8b9b..732f8c6ae975877ccd9c5873369f9be6490f06e9 100644 (file)
@@ -150,6 +150,9 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
                  __be32 src, __be32 dst, __u8 proto,
                  __u8 tos, __u8 ttl, __be16 df, bool xnet);
 
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+                                        int gso_type_mask);
+
 static inline void iptunnel_xmit_stats(int err,
                                       struct net_device_stats *err_stats,
                                       struct pcpu_tstats __percpu *stats)
index f0d70f066f3ddc59a0c1c7bb5e58daf1c4b3c3a4..1c2e1b9f6b8603aecdf14452a6b424b227cd504b 100644 (file)
@@ -236,7 +236,7 @@ static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
 #ifdef CONFIG_IP_VS_DEBUG
 #include <linux/net.h>
 
-extern int ip_vs_get_debug_level(void);
+int ip_vs_get_debug_level(void);
 
 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
                                         const union nf_inet_addr *addr,
@@ -532,9 +532,9 @@ struct ip_vs_proto_data {
        struct tcp_states_t     *tcp_state_table;
 };
 
-extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
-extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
-                                                    unsigned short proto);
+struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+                                             unsigned short proto);
 
 struct ip_vs_conn_param {
        struct net                      *net;
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
        struct rcu_head         rcu_head;
 };
 
-/* In grace period after removing */
-#define IP_VS_DEST_STATE_REMOVING      0x01
 /*
  *     The real server destination forwarding entry
  *     with ip address, port number, and so on.
@@ -742,7 +740,7 @@ struct ip_vs_dest {
 
        atomic_t                refcnt;         /* reference counter */
        struct ip_vs_stats      stats;          /* statistics */
-       unsigned long           state;          /* state flags */
+       unsigned long           idle_start;     /* start time, jiffies */
 
        /* connection counters and thresholds */
        atomic_t                activeconns;    /* active connections */
@@ -756,14 +754,13 @@ struct ip_vs_dest {
        struct ip_vs_dest_dst __rcu *dest_dst;  /* cached dst info */
 
        /* for virtual service */
-       struct ip_vs_service    *svc;           /* service it belongs to */
+       struct ip_vs_service __rcu *svc;        /* service it belongs to */
        __u16                   protocol;       /* which protocol (TCP/UDP) */
        __be16                  vport;          /* virtual port number */
        union nf_inet_addr      vaddr;          /* virtual IP address */
        __u32                   vfwmark;        /* firewall mark of service */
 
        struct list_head        t_list;         /* in dest_trash */
-       struct rcu_head         rcu_head;
        unsigned int            in_rs_table:1;  /* we are in rs_table */
 };
 
@@ -1176,8 +1173,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
  *      IPVS core functions
  *      (from ip_vs_core.c)
  */
-extern const char *ip_vs_proto_name(unsigned int proto);
-extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP     1
@@ -1240,22 +1237,22 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
        smp_mb__before_atomic_dec();
        atomic_dec(&cp->refcnt);
 }
-extern void ip_vs_conn_put(struct ip_vs_conn *cp);
-extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
                                  const union nf_inet_addr *daddr,
                                  __be16 dport, unsigned int flags,
                                  struct ip_vs_dest *dest, __u32 fwmark);
-extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
-extern const char * ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
-extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(struct net *net);
-extern int ip_vs_conn_init(void);
-extern void ip_vs_conn_cleanup(void);
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct);
+void ip_vs_random_dropentry(struct net *net);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
 
 static inline void ip_vs_control_del(struct ip_vs_conn *cp)
 {
@@ -1320,37 +1317,36 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
 /*
  * IPVS netns init & cleanup functions
  */
-extern int ip_vs_estimator_net_init(struct net *net);
-extern int ip_vs_control_net_init(struct net *net);
-extern int ip_vs_protocol_net_init(struct net *net);
-extern int ip_vs_app_net_init(struct net *net);
-extern int ip_vs_conn_net_init(struct net *net);
-extern int ip_vs_sync_net_init(struct net *net);
-extern void ip_vs_conn_net_cleanup(struct net *net);
-extern void ip_vs_app_net_cleanup(struct net *net);
-extern void ip_vs_protocol_net_cleanup(struct net *net);
-extern void ip_vs_control_net_cleanup(struct net *net);
-extern void ip_vs_estimator_net_cleanup(struct net *net);
-extern void ip_vs_sync_net_cleanup(struct net *net);
-extern void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct net *net);
+int ip_vs_control_net_init(struct net *net);
+int ip_vs_protocol_net_init(struct net *net);
+int ip_vs_app_net_init(struct net *net);
+int ip_vs_conn_net_init(struct net *net);
+int ip_vs_sync_net_init(struct net *net);
+void ip_vs_conn_net_cleanup(struct net *net);
+void ip_vs_app_net_cleanup(struct net *net);
+void ip_vs_protocol_net_cleanup(struct net *net);
+void ip_vs_control_net_cleanup(struct net *net);
+void ip_vs_estimator_net_cleanup(struct net *net);
+void ip_vs_sync_net_cleanup(struct net *net);
+void ip_vs_service_net_cleanup(struct net *net);
 
 /*
  *      IPVS application functions
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern struct ip_vs_app *register_ip_vs_app(struct net *net,
-                                           struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
-extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
-                                 __u16 proto, __u16 port);
-extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
-extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
-
-extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+                          __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
 
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
@@ -1371,17 +1367,15 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
 /*
  *     IPVS protocol functions (from ip_vs_proto.c)
  */
-extern int ip_vs_protocol_init(void);
-extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
-extern int *ip_vs_create_timeout_table(int *table, int size);
-extern int
-ip_vs_set_state_timeout(int *table, int num, const char *const *names,
-                       const char *name, int to);
-extern void
-ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
-                         const struct sk_buff *skb,
-                         int offset, const char *msg);
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+                           const char *name, int to);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+                              const struct sk_buff *skb, int offset,
+                              const char *msg);
 
 extern struct ip_vs_protocol ip_vs_protocol_tcp;
 extern struct ip_vs_protocol ip_vs_protocol_udp;
@@ -1394,22 +1388,22 @@ extern struct ip_vs_protocol ip_vs_protocol_sctp;
  *      Registering/unregistering scheduler functions
  *      (from ip_vs_sched.c)
  */
-extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
-                               struct ip_vs_scheduler *scheduler);
-extern void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
-                                  struct ip_vs_scheduler *sched);
-extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
-extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
-extern struct ip_vs_conn *
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+                        struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+                           struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
               struct ip_vs_proto_data *pd, int *ignored,
               struct ip_vs_iphdr *iph);
-extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-                       struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+               struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
 
-extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
 
 /*
@@ -1418,25 +1412,24 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 extern struct ip_vs_stats ip_vs_stats;
 extern int sysctl_ip_vs_sync_ver;
 
-extern struct ip_vs_service *
+struct ip_vs_service *
 ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport);
 
-extern bool
-ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
-                      const union nf_inet_addr *daddr, __be16 dport);
-
-extern int ip_vs_use_count_inc(void);
-extern void ip_vs_use_count_dec(void);
-extern int ip_vs_register_nl_ioctl(void);
-extern void ip_vs_unregister_nl_ioctl(void);
-extern int ip_vs_control_init(void);
-extern void ip_vs_control_cleanup(void);
-extern struct ip_vs_dest *
+bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+                           const union nf_inet_addr *daddr, __be16 dport);
+
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
                __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
                __u16 protocol, __u32 fwmark, __u32 flags);
-extern void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
 {
@@ -1453,56 +1446,49 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
-                            __u8 syncid);
-extern int stop_sync_thread(struct net *net, int state);
-extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int stop_sync_thread(struct net *net, int state);
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
-extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
-                                struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+                         struct ip_vs_stats *stats);
 
 /*
  *     Various IPVS packet transmitters (from ip_vs_xmit.c)
  */
-extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                          struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                         struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                          struct ip_vs_protocol *pp, int offset,
-                          unsigned int hooknum, struct ip_vs_iphdr *iph);
-extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                   struct ip_vs_protocol *pp, int offset,
+                   unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                               struct ip_vs_protocol *pp,
-                               struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                               struct ip_vs_protocol *pp,
-                               struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                           struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                             struct ip_vs_protocol *pp, int offset,
-                             unsigned int hooknum, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                      struct ip_vs_protocol *pp, int offset,
+                      unsigned int hooknum, struct ip_vs_iphdr *iph);
 #endif
 
 #ifdef CONFIG_SYSCTL
@@ -1551,15 +1537,15 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
        return fwd;
 }
 
-extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
-                          struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+                   struct ip_vs_conn *cp, int dir);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
-                             struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+                      struct ip_vs_conn *cp, int dir);
 #endif
 
-extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
 
 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
 {
@@ -1618,13 +1604,13 @@ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 #endif
 }
 
-extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
-                                  int outin);
-extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
-extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
-                                     struct ip_vs_conn *cp, u_int8_t proto,
-                                     const __be16 port, int from_rs);
-extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+                           int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+                              struct ip_vs_conn *cp, u_int8_t proto,
+                              const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
 
 #else
 
@@ -1649,7 +1635,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 /* CONFIG_IP_VS_NFCT */
 #endif
 
-static inline unsigned int
+static inline int
 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
 {
        /*
index bbf1c8fb8511c70220c4a2b7c046f6007cf525b5..dd96638ab8ff6390b498aa3c8729eb16d46496a4 100644 (file)
@@ -244,14 +244,14 @@ struct ipv6_fl_socklist {
        struct rcu_head                 rcu;
 };
 
-extern struct ip6_flowlabel    *fl6_sock_lookup(struct sock *sk, __be32 label);
-extern struct ipv6_txoptions   *fl6_merge_options(struct ipv6_txoptions * opt_space,
-                                                  struct ip6_flowlabel * fl,
-                                                  struct ipv6_txoptions * fopt);
-extern void                    fl6_free_socklist(struct sock *sk);
-extern int                     ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
-extern int                     ip6_flowlabel_init(void);
-extern void                    ip6_flowlabel_cleanup(void);
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+                                        struct ip6_flowlabel *fl,
+                                        struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
 
 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 {
@@ -259,7 +259,7 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
                atomic_dec(&fl->users);
 }
 
-extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                               struct icmp6hdr *thdr, int len);
@@ -267,19 +267,21 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
                                      struct sock *sk, struct flowi6 *fl6);
 
-extern int                     ip6_ra_control(struct sock *sk, int sel);
+int ip6_ra_control(struct sock *sk, int sel);
 
-extern int                     ipv6_parse_hopopts(struct sk_buff *skb);
+int ipv6_parse_hopopts(struct sk_buff *skb);
 
-extern struct ipv6_txoptions *  ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
-extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-                                                  int newtype,
-                                                  struct ipv6_opt_hdr __user *newopt,
-                                                  int newoptlen);
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+                                       struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+                                         struct ipv6_txoptions *opt,
+                                         int newtype,
+                                         struct ipv6_opt_hdr __user *newopt,
+                                         int newoptlen);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
                                          struct ipv6_txoptions *opt);
 
-extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
 static inline bool ipv6_accept_ra(struct inet6_dev *idev)
 {
@@ -306,7 +308,7 @@ static inline int ip6_frag_mem(struct net *net)
 #define IPV6_FRAG_LOW_THRESH   (3 * 1024*1024) /* 3145728 */
 #define IPV6_FRAG_TIMEOUT      (60 * HZ)       /* 60 seconds */
 
-extern int __ipv6_addr_type(const struct in6_addr *addr);
+int __ipv6_addr_type(const struct in6_addr *addr);
 static inline int ipv6_addr_type(const struct in6_addr *addr)
 {
        return __ipv6_addr_type(addr) & 0xffff;
@@ -537,14 +539,14 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
 }
 
 /* more secured version of ipv6_addr_hash() */
-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 {
        u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
 
        return jhash_3words(v,
                            (__force u32)a->s6_addr32[2],
                            (__force u32)a->s6_addr32[3],
-                           ipv6_hash_secret);
+                           initval);
 }
 
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -656,9 +658,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
+int ip6_dst_hoplimit(struct dst_entry *dst);
 
 /*
  *     Header manipulation
@@ -682,83 +684,65 @@ static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
  *     rcv function (called from netdevice level)
  */
 
-extern int                     ipv6_rcv(struct sk_buff *skb, 
-                                        struct net_device *dev, 
-                                        struct packet_type *pt,
-                                        struct net_device *orig_dev);
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+            struct packet_type *pt, struct net_device *orig_dev);
 
-extern int                     ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sk_buff *skb);
 
 /*
  *     upper-layer output functions
  */
-extern int                     ip6_xmit(struct sock *sk,
-                                        struct sk_buff *skb,
-                                        struct flowi6 *fl6,
-                                        struct ipv6_txoptions *opt,
-                                        int tclass);
-
-extern int                     ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
-
-extern int                     ip6_append_data(struct sock *sk,
-                                               int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
-                                               void *from,
-                                               int length,
-                                               int transhdrlen,
-                                               int hlimit,
-                                               int tclass,
-                                               struct ipv6_txoptions *opt,
-                                               struct flowi6 *fl6,
-                                               struct rt6_info *rt,
-                                               unsigned int flags,
-                                               int dontfrag);
-
-extern int                     ip6_push_pending_frames(struct sock *sk);
-
-extern void                    ip6_flush_pending_frames(struct sock *sk);
-
-extern int                     ip6_dst_lookup(struct sock *sk,
-                                              struct dst_entry **dst,
-                                              struct flowi6 *fl6);
-extern struct dst_entry *      ip6_dst_lookup_flow(struct sock *sk,
-                                                   struct flowi6 *fl6,
-                                                   const struct in6_addr *final_dst,
-                                                   bool can_sleep);
-extern struct dst_entry *      ip6_sk_dst_lookup_flow(struct sock *sk,
-                                                      struct flowi6 *fl6,
-                                                      const struct in6_addr *final_dst,
-                                                      bool can_sleep);
-extern struct dst_entry *      ip6_blackhole_route(struct net *net,
-                                                   struct dst_entry *orig_dst);
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+            struct ipv6_txoptions *opt, int tclass);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+int ip6_append_data(struct sock *sk,
+                   int getfrag(void *from, char *to, int offset, int len,
+                               int odd, struct sk_buff *skb),
+                   void *from, int length, int transhdrlen, int hlimit,
+                   int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+                   struct rt6_info *rt, unsigned int flags, int dontfrag);
+
+int ip6_push_pending_frames(struct sock *sk);
+
+void ip6_flush_pending_frames(struct sock *sk);
+
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                     const struct in6_addr *final_dst,
+                                     bool can_sleep);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                        const struct in6_addr *final_dst,
+                                        bool can_sleep);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+                                     struct dst_entry *orig_dst);
 
 /*
  *     skb processing functions
  */
 
-extern int                     ip6_output(struct sk_buff *skb);
-extern int                     ip6_forward(struct sk_buff *skb);
-extern int                     ip6_input(struct sk_buff *skb);
-extern int                     ip6_mc_input(struct sk_buff *skb);
+int ip6_output(struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
 
-extern int                     __ip6_local_out(struct sk_buff *skb);
-extern int                     ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out(struct sk_buff *skb);
 
 /*
  *     Extension header (options) processing
  */
 
-extern void                    ipv6_push_nfrag_opts(struct sk_buff *skb,
-                                                    struct ipv6_txoptions *opt,
-                                                    u8 *proto,
-                                                    struct in6_addr **daddr_p);
-extern void                    ipv6_push_frag_opts(struct sk_buff *skb,
-                                                   struct ipv6_txoptions *opt,
-                                                   u8 *proto);
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+                         u8 *proto, struct in6_addr **daddr_p);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+                        u8 *proto);
 
-extern int                     ipv6_skip_exthdr(const struct sk_buff *, int start,
-                                                u8 *nexthdrp, __be16 *frag_offp);
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+                    __be16 *frag_offp);
 
-extern bool                    ipv6_ext_hdr(u8 nexthdr);
+bool ipv6_ext_hdr(u8 nexthdr);
 
 enum {
        IP6_FH_F_FRAG           = (1 << 0),
@@ -767,57 +751,44 @@ enum {
 };
 
 /* find specified header and get offset to it */
-extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
-                        int target, unsigned short *fragoff, int *fragflg);
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+                 unsigned short *fragoff, int *fragflg);
 
-extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
-extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
-                                      const struct ipv6_txoptions *opt,
-                                      struct in6_addr *orig);
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+                               const struct ipv6_txoptions *opt,
+                               struct in6_addr *orig);
 
 /*
  *     socket options (ipv6_sockglue.c)
  */
 
-extern int                     ipv6_setsockopt(struct sock *sk, int level, 
-                                               int optname,
-                                               char __user *optval, 
-                                               unsigned int optlen);
-extern int                     ipv6_getsockopt(struct sock *sk, int level, 
-                                               int optname,
-                                               char __user *optval, 
-                                               int __user *optlen);
-extern int                     compat_ipv6_setsockopt(struct sock *sk,
-                                               int level,
-                                               int optname,
-                                               char __user *optval,
-                                               unsigned int optlen);
-extern int                     compat_ipv6_getsockopt(struct sock *sk,
-                                               int level,
-                                               int optname,
-                                               char __user *optval,
-                                               int __user *optlen);
-
-extern int                     ip6_datagram_connect(struct sock *sk, 
-                                                    struct sockaddr *addr, int addr_len);
-
-extern int                     ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern int                     ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
-extern void                    ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
-                                               u32 info, u8 *payload);
-extern void                    ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
-extern void                    ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
-
-extern int inet6_release(struct socket *sock);
-extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 
-                     int addr_len);
-extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
-                        int *uaddr_len, int peer);
-extern int inet6_ioctl(struct socket *sock, unsigned int cmd, 
-                      unsigned long arg);
-
-extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+int ipv6_setsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, unsigned int optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, int __user *optlen);
+int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, unsigned int optlen);
+int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, int __user *optlen);
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
+
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+                    u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+                 int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
                              struct sock *sk);
 
 /*
@@ -829,30 +800,27 @@ extern const struct proto_ops inet6_dgram_ops;
 struct group_source_req;
 struct group_filter;
 
-extern int ip6_mc_source(int add, int omode, struct sock *sk,
-                        struct group_source_req *pgsr);
-extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
-extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
-                        struct group_filter __user *optval,
-                        int __user *optlen);
-extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-                                   const struct in6_addr *daddr, u32 rnd);
+int ip6_mc_source(int add, int omode, struct sock *sk,
+                 struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+                 struct group_filter __user *optval, int __user *optlen);
 
 #ifdef CONFIG_PROC_FS
-extern int  ac6_proc_init(struct net *net);
-extern void ac6_proc_exit(struct net *net);
-extern int  raw6_proc_init(void);
-extern void raw6_proc_exit(void);
-extern int  tcp6_proc_init(struct net *net);
-extern void tcp6_proc_exit(struct net *net);
-extern int  udp6_proc_init(struct net *net);
-extern void udp6_proc_exit(struct net *net);
-extern int  udplite6_proc_init(void);
-extern void udplite6_proc_exit(void);
-extern int  ipv6_misc_proc_init(void);
-extern void ipv6_misc_proc_exit(void);
-extern int snmp6_register_dev(struct inet6_dev *idev);
-extern int snmp6_unregister_dev(struct inet6_dev *idev);
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
 
 #else
 static inline int ac6_proc_init(struct net *net) { return 0; }
@@ -865,10 +833,10 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
 extern struct ctl_table ipv6_route_table_template[];
 extern struct ctl_table ipv6_icmp_table_template[];
 
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-extern int ipv6_sysctl_register(void);
-extern void ipv6_sysctl_unregister(void);
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
 #endif
 
 #endif /* _NET_IPV6_H */
index c1fec6b464cc7b3926bfa12e55893994d772cc32..9e9e35465bafbaff131c893814abd44d99be712f 100644 (file)
@@ -123,23 +123,23 @@ extern struct list_head ipx_routes;
 extern rwlock_t ipx_routes_lock;
 
 extern struct list_head ipx_interfaces;
-extern struct ipx_interface *ipx_interfaces_head(void);
+struct ipx_interface *ipx_interfaces_head(void);
 extern spinlock_t ipx_interfaces_lock;
 
 extern struct ipx_interface *ipx_primary_net;
 
-extern int ipx_proc_init(void);
-extern void ipx_proc_exit(void);
+int ipx_proc_init(void);
+void ipx_proc_exit(void);
 
-extern const char *ipx_frame_name(__be16);
-extern const char *ipx_device_name(struct ipx_interface *intrfc);
+const char *ipx_frame_name(__be16);
+const char *ipx_device_name(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
 {
        atomic_inc(&intrfc->refcnt);
 }
 
-extern void ipxitf_down(struct ipx_interface *intrfc);
+void ipxitf_down(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
 {
index 80ffde3bb164e2a3a856838f0c8c4c2906bfeaf6..0224402260a77a3653a276e0c33f2e9281601f80 100644 (file)
@@ -105,13 +105,13 @@ struct ircomm_tty_cb {
 void ircomm_tty_start(struct tty_struct *tty);
 void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
 
-extern int ircomm_tty_tiocmget(struct tty_struct *tty);
-extern int ircomm_tty_tiocmset(struct tty_struct *tty,
-                              unsigned int set, unsigned int clear);
-extern int ircomm_tty_ioctl(struct tty_struct *tty, 
-                           unsigned int cmd, unsigned long arg);
-extern void ircomm_tty_set_termios(struct tty_struct *tty, 
-                                  struct ktermios *old_termios);
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+                       unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+                    unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+                           struct ktermios *old_termios);
 
 #endif
 
index 3bed61d379a8aa1de70fe25935810c830cd70ac2..a059465101ff3b5a35120d09782b6f10db9c98fa 100644 (file)
@@ -112,20 +112,19 @@ do { if(!(expr)) { \
 struct net_device;
 struct packet_type;
 
-extern void irda_proc_register(void);
-extern void irda_proc_unregister(void);
+void irda_proc_register(void);
+void irda_proc_unregister(void);
 
-extern int irda_sysctl_register(void);
-extern void irda_sysctl_unregister(void);
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
 
-extern int irsock_init(void);
-extern void irsock_cleanup(void);
+int irsock_init(void);
+void irsock_cleanup(void);
 
-extern int irda_nl_register(void);
-extern void irda_nl_unregister(void);
+int irda_nl_register(void);
+void irda_nl_unregister(void);
 
-extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
-                           struct packet_type *ptype,
-                           struct net_device *orig_dev);
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+                    struct packet_type *ptype, struct net_device *orig_dev);
 
 #endif /* NET_IRDA_H */
index 94c852d47d0f8d5c7d033279bf2f67ca3fd861dd..11417475a6c31059aa28d5db55ce90c2a536a703 100644 (file)
@@ -162,7 +162,7 @@ typedef struct {
         int irq, irq2;        /* Interrupts used */
         int dma, dma2;        /* DMA channel(s) used */
         int fifo_size;        /* FIFO size */
-        int irqflags;         /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */
+        int irqflags;         /* interrupt flags (ie, IRQF_SHARED) */
        int direction;        /* Link direction, used by some FIR drivers */
        int enabled;          /* Powered on? */
        int suspended;        /* Suspended by APM */
index 4c90824c50fb1ee002aa6e82f50f32fe0c2b940a..f9d88da97af2c15844caf5b9ba875cffb8a5f8c3 100644 (file)
@@ -126,6 +126,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
                    struct sk_buff *skb, struct irlap_info *info);
 void irlap_print_event(IRLAP_EVENT event);
 
-extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
 
 #endif
index 6b1dc4f8eca58a231ff481ee6fd5be8ac1a8f15c..57173ae398aed4096dd8b2cc7374bd4645edc3e7 100644 (file)
@@ -163,7 +163,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
 void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
                         __u8 caddr, int command);
 
-extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self, 
-                                              struct sk_buff *skb);
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+                                       struct sk_buff *skb);
 
 #endif
index 5d5a6a4732effd8865918fe31c390c5067505534..a830b01baba4b08a1d5054557c540312084b8f50 100644 (file)
@@ -432,44 +432,32 @@ struct iw_public_data {
 /* First : function strictly used inside the kernel */
 
 /* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
-                                int length);
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 
 /* Second : functions that may be called by driver modules */
 
 /* Send a single event to user space */
-extern void wireless_send_event(struct net_device *    dev,
-                               unsigned int            cmd,
-                               union iwreq_data *      wrqu,
-                               const char *            extra);
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+                        union iwreq_data *wrqu, const char *extra);
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
 
 /* Standard handler for SIOCSIWSPY */
-extern int iw_handler_set_spy(struct net_device *      dev,
-                             struct iw_request_info *  info,
-                             union iwreq_data *        wrqu,
-                             char *                    extra);
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWSPY */
-extern int iw_handler_get_spy(struct net_device *      dev,
-                             struct iw_request_info *  info,
-                             union iwreq_data *        wrqu,
-                             char *                    extra);
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCSIWTHRSPY */
-extern int iw_handler_set_thrspy(struct net_device *   dev,
-                                struct iw_request_info *info,
-                                union iwreq_data *     wrqu,
-                                char *                 extra);
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+                         union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWTHRSPY */
-extern int iw_handler_get_thrspy(struct net_device *   dev,
-                                struct iw_request_info *info,
-                                union iwreq_data *     wrqu,
-                                char *                 extra);
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+                         union iwreq_data *wrqu, char *extra);
 /* Driver call to update spy records */
-extern void wireless_spy_update(struct net_device *    dev,
-                               unsigned char *         address,
-                               struct iw_quality *     wstats);
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+                        struct iw_quality *wstats);
 
 /************************* INLINE FUNTIONS *************************/
 /*
index df892a94f2c6570b642a33391fb9301ee99bcedb..9510f8725f03c316ab4eb61e9ee503218d2056c8 100644 (file)
@@ -105,40 +105,40 @@ struct lapb_cb {
 };
 
 /* lapb_iface.c */
-extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_connect_indication(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
-extern int  lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
-extern int  lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_in.c */
-extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_out.c */
-extern void lapb_kick(struct lapb_cb *lapb);
-extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
-extern void lapb_establish_data_link(struct lapb_cb *lapb);
-extern void lapb_enquiry_response(struct lapb_cb *lapb);
-extern void lapb_timeout_response(struct lapb_cb *lapb);
-extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
 
 /* lapb_subr.c */
-extern void lapb_clear_queues(struct lapb_cb *lapb);
-extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_requeue_frames(struct lapb_cb *lapb);
-extern int  lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
-extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
-extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
-extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
 
 /* lapb_timer.c */
-extern void lapb_start_t1timer(struct lapb_cb *lapb);
-extern void lapb_start_t2timer(struct lapb_cb *lapb);
-extern void lapb_stop_t1timer(struct lapb_cb *lapb);
-extern void lapb_stop_t2timer(struct lapb_cb *lapb);
-extern int  lapb_t1timer_running(struct lapb_cb *lapb);
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
 
 /*
  * Debug levels.
index 9e7d7f08ef77c5539ea887b483b274fc0ddb91fd..68490cbc8a659de87fefbfe1d80f8b29bed85dbc 100644 (file)
@@ -95,29 +95,29 @@ struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
 extern struct list_head llc_sap_list;
 extern spinlock_t llc_sap_list_lock;
 
-extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev);
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+           struct net_device *orig_dev);
 
-extern int llc_mac_hdr_init(struct sk_buff *skb,
-                           const unsigned char *sa, const unsigned char *da);
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+                    const unsigned char *da);
 
-extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
-                                                  struct sk_buff *skb));
-extern void llc_remove_pack(int type);
+void llc_add_pack(int type,
+                 void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
 
-extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
 
-extern struct llc_sap *llc_sap_open(unsigned char lsap,
-                                   int (*rcv)(struct sk_buff *skb,
-                                              struct net_device *dev,
-                                              struct packet_type *pt,
-                                              struct net_device *orig_dev));
+struct llc_sap *llc_sap_open(unsigned char lsap,
+                            int (*rcv)(struct sk_buff *skb,
+                                       struct net_device *dev,
+                                       struct packet_type *pt,
+                                       struct net_device *orig_dev));
 static inline void llc_sap_hold(struct llc_sap *sap)
 {
        atomic_inc(&sap->refcnt);
 }
 
-extern void llc_sap_close(struct llc_sap *sap);
+void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
 {
@@ -125,27 +125,27 @@ static inline void llc_sap_put(struct llc_sap *sap)
                llc_sap_close(sap);
 }
 
-extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+struct llc_sap *llc_sap_find(unsigned char sap_value);
 
-extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
-                                    unsigned char *dmac, unsigned char dsap);
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                             unsigned char *dmac, unsigned char dsap);
 
-extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
 
-extern void llc_station_init(void);
-extern void llc_station_exit(void);
+void llc_station_init(void);
+void llc_station_exit(void);
 
 #ifdef CONFIG_PROC_FS
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
+int llc_proc_init(void);
+void llc_proc_exit(void);
 #else
 #define llc_proc_init()        (0)
 #define llc_proc_exit()        do { } while(0)
 #endif /* CONFIG_PROC_FS */
 #ifdef CONFIG_SYSCTL
-extern int llc_sysctl_init(void);
-extern void llc_sysctl_exit(void);
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
 
 extern int sysctl_llc2_ack_timeout;
 extern int sysctl_llc2_busy_timeout;
index df83f69d2de41903bc6b21467475853322828bdc..f3be818e73c1fbe1034d63bf6f2d132848060908 100644 (file)
 
 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
-                                                  struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
-                                              struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
-                                              struct sk_buff *skb);
-extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
-                                                      struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
                                            struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
+                                              struct sk_buff *skb);
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
                                                struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
-                                                   struct sk_buff *skb);
-extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
-                                                        struct sk_buff *skb);
-extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
-extern u8 llc_circular_between(u8 a, u8 b, u8 c);
-extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+                                                 struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
 
-extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+void llc_conn_rej_tmr_cb(unsigned long timeout_data);
 
-extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
 #endif /* LLC_C_AC_H */
index 6ca3113df39eed10eb2032a11c6b4a7c43abe003..3948cf111dd0d6d4948ed3b7c40740b82bfe41bc 100644 (file)
@@ -128,138 +128,93 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_detected(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
                                              struct sk_buff *skb);
-extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
-                                                   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
                                               struct sk_buff *skb);
-extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+                                              struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
 /* NOT_USED functions and their variations */
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
 
 /* Available connection action qualifiers */
-extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
-                                               struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
-                                               struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
 
 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
 {
index 2f97d8ddce924dbfc61f473896cacda3cfb70456..0134681acc4cfe354c6546be1a1ffefa5c078cde 100644 (file)
@@ -95,28 +95,24 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
        return skb->cb[sizeof(skb->cb) - 1];
 }
 
-extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
-                                struct proto *prot);
-extern void llc_sk_free(struct sock *sk);
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+                         struct proto *prot);
+void llc_sk_free(struct sock *sk);
 
-extern void llc_sk_reset(struct sock *sk);
+void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
-extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
-                                        u8 first_p_bit);
-extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
-                                        u8 first_f_bit);
-extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
-                                     u16 *how_many_unacked);
-extern struct sock *llc_lookup_established(struct llc_sap *sap,
-                                          struct llc_addr *daddr,
-                                          struct llc_addr *laddr);
-extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
-extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+                                   struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
 
-extern u8 llc_data_accept_state(u8 state);
-extern void llc_build_offset_table(void);
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
 #endif /* LLC_CONN_H */
index f0cb909b60eb8c73670cf969a5bf544267dd7730..8d5c543cd620037590b1ee823548a9c091936de3 100644 (file)
@@ -62,8 +62,7 @@
 #define LLC_STATUS_CONFLICT    7 /* disconnect conn */
 #define LLC_STATUS_RESET_DONE  8 /*  */
 
-extern int llc_establish_connection(struct sock *sk, u8 *lmac,
-                                   u8 *dmac, u8 dsap);
-extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
-extern int llc_send_disc(struct sock *sk);
+int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
 #endif /* LLC_IF_H */
index 5a93d13ac95c8177542a3f2b327ae60b26065cea..31e2de7d57c5d6c210e1b445b19551407fd31b6c 100644 (file)
@@ -410,21 +410,20 @@ struct llc_frmr_info {
        u8  ind_bits;           /* indicator bits set with macro */
 } __packed;
 
-extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
-extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
-extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
-extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
-extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
-extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
-                                    struct llc_pdu_sn *prev_pdu,
-                                    u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
-extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+                             u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
 #endif /* LLC_PDU_H */
index 37a3bbd02394b3497e91316275babc01214f5d08..a61b98c108ee2c4df241a7d2318249427b686951 100644 (file)
 /* All action functions must look like this */
 typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
-                                      struct sk_buff *skb);
-extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_report_status(struct llc_sap *sap,
-                                       struct sk_buff *skb);
-extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_AC_H */
index e3acb9329e4af4106bc221af99e94670b2393e93..84db3a59ed2880bb4b797fd64ea012beb9136ea1 100644 (file)
@@ -53,15 +53,14 @@ struct llc_sap;
 
 typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
-                                      struct sk_buff *skb);
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_EV_H */
index ed25bec2f6483aa7f2b6bda6a6324eb2206ee383..1e4df9fd9fb2cf6b06acdf2b923ed41fdfad142c 100644 (file)
@@ -19,18 +19,14 @@ struct net_device;
 struct sk_buff;
 struct sock;
 
-extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
-                              unsigned char prim);
-extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
-                                      u8 type, u32 data_size);
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+                       unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+                               u8 type, u32 data_size);
 
-extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
-                                       struct sk_buff *skb,
-                                       unsigned char *dmac,
-                                       unsigned char dsap);
-extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
-                                      struct sk_buff *skb,
-                                      unsigned char *dmac,
-                                      unsigned char dsap);
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                                unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                               unsigned char *dmac, unsigned char dsap);
 #endif /* LLC_SAP_H */
index d0d11df9cba1ca3b4f8196071f8a0beed8b32f25..807d6b7a943fecab78db1afd6b57ee33b70ded72 100644 (file)
@@ -133,7 +133,7 @@ struct ieee802154_ops {
 
 /* Basic interface to register ieee802154 device */
 struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops);
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
 void ieee802154_free_device(struct ieee802154_dev *dev);
 int ieee802154_register_device(struct ieee802154_dev *dev);
 void ieee802154_unregister_device(struct ieee802154_dev *dev);
index 4fbf02aa2ec1e76a5c18257b0b73949e501c56a5..31912c3be772bfa5971e3f31a11f982cdd22ad2d 100644 (file)
@@ -112,6 +112,7 @@ struct mrp_applicant {
        struct mrp_application  *app;
        struct net_device       *dev;
        struct timer_list       join_timer;
+       struct timer_list       periodic_timer;
 
        spinlock_t              lock;
        struct sk_buff_head     queue;
@@ -125,19 +126,17 @@ struct mrp_port {
        struct rcu_head                 rcu;
 };
 
-extern int     mrp_register_application(struct mrp_application *app);
-extern void    mrp_unregister_application(struct mrp_application *app);
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
 
-extern int     mrp_init_applicant(struct net_device *dev,
-                                   struct mrp_application *app);
-extern void    mrp_uninit_applicant(struct net_device *dev,
-                                     struct mrp_application *app);
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
 
-extern int     mrp_request_join(const struct net_device *dev,
-                                 const struct mrp_application *app,
-                                 const void *value, u8 len, u8 type);
-extern void    mrp_request_leave(const struct net_device *dev,
-                                  const struct mrp_application *app,
-                                  const void *value, u8 len, u8 type);
+int mrp_request_join(const struct net_device *dev,
+                    const struct mrp_application *app,
+                    const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+                      const struct mrp_application *app,
+                      const void *value, u8 len, u8 type);
 
 #endif /* _NET_MRP_H */
index ea0cc26ab70e1b447a0e834992c746a9f33c9938..6bbda34d5e59d030a1e2d69e93cda754d06ae5c0 100644 (file)
@@ -110,8 +110,8 @@ struct ndisc_options {
 
 #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
 
-extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
-                                                struct ndisc_options *ndopts);
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+                                         struct ndisc_options *ndopts);
 
 /*
  * Return the padding between the option length and the start of the
@@ -189,60 +189,51 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
        return n;
 }
 
-extern int                     ndisc_init(void);
-extern int                     ndisc_late_init(void);
+int ndisc_init(void);
+int ndisc_late_init(void);
 
-extern void                    ndisc_late_cleanup(void);
-extern void                    ndisc_cleanup(void);
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
 
-extern int                     ndisc_rcv(struct sk_buff *skb);
+int ndisc_rcv(struct sk_buff *skb);
 
-extern void                    ndisc_send_ns(struct net_device *dev,
-                                             struct neighbour *neigh,
-                                             const struct in6_addr *solicit,
-                                             const struct in6_addr *daddr,
-                                             const struct in6_addr *saddr);
+void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+                  const struct in6_addr *solicit,
+                  const struct in6_addr *daddr, const struct in6_addr *saddr);
 
-extern void                    ndisc_send_rs(struct net_device *dev,
-                                             const struct in6_addr *saddr,
-                                             const struct in6_addr *daddr);
-extern void                    ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-                                             const struct in6_addr *daddr,
-                                             const struct in6_addr *solicited_addr,
-                                             bool router, bool solicited, bool override,
-                                             bool inc_opt);
+void ndisc_send_rs(struct net_device *dev,
+                  const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+                  const struct in6_addr *daddr,
+                  const struct in6_addr *solicited_addr,
+                  bool router, bool solicited, bool override, bool inc_opt);
 
-extern void                    ndisc_send_redirect(struct sk_buff *skb,
-                                                   const struct in6_addr *target);
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
 
-extern int                     ndisc_mc_map(const struct in6_addr *addr, char *buf,
-                                            struct net_device *dev, int dir);
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+                int dir);
 
 
 /*
  *     IGMP
  */
-extern int                     igmp6_init(void);
+int igmp6_init(void);
 
-extern void                    igmp6_cleanup(void);
+void igmp6_cleanup(void);
 
-extern int                     igmp6_event_query(struct sk_buff *skb);
+int igmp6_event_query(struct sk_buff *skb);
 
-extern int                     igmp6_event_report(struct sk_buff *skb);
+int igmp6_event_report(struct sk_buff *skb);
 
 
 #ifdef CONFIG_SYSCTL
-extern int                     ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
-                                                          int write,
-                                                          void __user *buffer,
-                                                          size_t *lenp,
-                                                          loff_t *ppos);
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
 int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
                                 void __user *oldval, size_t __user *oldlenp,
                                 void __user *newval, size_t newlen);
 #endif
 
-extern void                    inet6_ifinfo_notify(int event,
-                                                   struct inet6_dev *idev);
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
 
 #endif
index 1313456a0994e03cab0859e7960773b0115f8652..da68c9a90ac56932638feca9cdbba6a61b75ba64 100644 (file)
@@ -22,6 +22,7 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netns/conntrack.h>
 #endif
+#include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
 
 struct user_namespace;
@@ -74,6 +75,7 @@ struct net {
        struct hlist_head       *dev_index_head;
        unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
        int                     ifindex;
+       unsigned int            dev_unreg_count;
 
        /* core fib_rules */
        struct list_head        rules_ops;
@@ -100,6 +102,9 @@ struct net {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        struct netns_ct         ct;
 #endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+       struct netns_nftables   nft;
+#endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
 #endif
@@ -136,8 +141,8 @@ struct net {
 extern struct net init_net;
 
 #ifdef CONFIG_NET_NS
-extern struct net *copy_net_ns(unsigned long flags,
-       struct user_namespace *user_ns, struct net *old_net);
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+                       struct net *old_net);
 
 #else /* CONFIG_NET_NS */
 #include <linux/sched.h>
@@ -154,11 +159,11 @@ static inline struct net *copy_net_ns(unsigned long flags,
 
 extern struct list_head net_namespace_list;
 
-extern struct net *get_net_ns_by_pid(pid_t pid);
-extern struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_fd(int pid);
 
 #ifdef CONFIG_NET_NS
-extern void __put_net(struct net *net);
+void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
@@ -190,7 +195,7 @@ int net_eq(const struct net *net1, const struct net *net2)
        return net1 == net2;
 }
 
-extern void net_drop_ns(void *);
+void net_drop_ns(void *);
 
 #else
 
@@ -307,19 +312,19 @@ struct pernet_operations {
  * device which caused kernel oops, and panics during network
  * namespace cleanup.   So please don't get this wrong.
  */
-extern int register_pernet_subsys(struct pernet_operations *);
-extern void unregister_pernet_subsys(struct pernet_operations *);
-extern int register_pernet_device(struct pernet_operations *);
-extern void unregister_pernet_device(struct pernet_operations *);
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
 
 struct ctl_table;
 struct ctl_table_header;
 
 #ifdef CONFIG_SYSCTL
-extern int net_sysctl_init(void);
-extern struct ctl_table_header *register_net_sysctl(struct net *net,
-       const char *path, struct ctl_table *table);
-extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+                                            struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
 #else
 static inline int net_sysctl_init(void) { return 0; }
 static inline struct ctl_table_header *register_net_sysctl(struct net *net,
index fe630dde35c3f3688d30e1e33a5fb5d06393bba6..d8bbb38584b603ac5cab30d3f86049565b855106 100644 (file)
@@ -26,8 +26,8 @@ enum netevent_notif_type {
        NETEVENT_REDIRECT,         /* arg is struct netevent_redirect ptr */
 };
 
-extern int register_netevent_notifier(struct notifier_block *nb);
-extern int unregister_netevent_notifier(struct notifier_block *nb);
-extern int call_netevent_notifiers(unsigned long val, void *v);
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
 
 #endif
index 7573d52a43469a9dbc3feeb6555810ceb9e1a740..6c3d12e2949f908c3927cb4bfe29ac30c49e1e4a 100644 (file)
@@ -16,9 +16,9 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 
-extern int nf_conntrack_ipv4_compat_init(void);
-extern void nf_conntrack_ipv4_compat_fini(void);
+int nf_conntrack_ipv4_compat_init(void);
+void nf_conntrack_ipv4_compat_fini(void);
 
-extern void need_ipv4_conntrack(void);
+void need_ipv4_conntrack(void);
 
 #endif /*_NF_CONNTRACK_IPV4_H*/
index 6b00ea38546b8e829f5b7a97bc5c33b35b0a030a..f01ef208dff6ec151f82eb2a33626fdae7edea42 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _NF_DEFRAG_IPV4_H
 #define _NF_DEFRAG_IPV4_H
 
-extern void nf_defrag_ipv4_enable(void);
+void nf_defrag_ipv4_enable(void);
 
 #endif /* _NF_DEFRAG_IPV4_H */
index fd79c9a1779d19d6a5dd54ef7380b990763a00ea..5613412e7dc29a996cb5991aa809526e3f440875 100644 (file)
@@ -1,15 +1,14 @@
 #ifndef _NF_DEFRAG_IPV6_H
 #define _NF_DEFRAG_IPV6_H
 
-extern void nf_defrag_ipv6_enable(void);
-
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-                              struct net_device *in,
-                              struct net_device *out,
-                              int (*okfn)(struct sk_buff *));
+void nf_defrag_ipv6_enable(void);
+
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+                       struct net_device *in, struct net_device *out,
+                       int (*okfn)(struct sk_buff *));
 
 struct inet_frags_ctl;
 
index 0c1288a50e8bde05d896ebf243d0b7c3a01deb11..01ea6eed1bb1ddcc9b8c9001286ae85730bea416 100644 (file)
@@ -139,15 +139,13 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
 }
 
 /* Alter reply tuple (maybe alter helper). */
-extern void
-nf_conntrack_alter_reply(struct nf_conn *ct,
-                        const struct nf_conntrack_tuple *newreply);
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+                             const struct nf_conntrack_tuple *newreply);
 
 /* Is this tuple taken? (ignoring any belonging to the given
    conntrack). */
-extern int
-nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
-                        const struct nf_conn *ignored_conntrack);
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+                            const struct nf_conn *ignored_conntrack);
 
 /* Return conntrack_info and tuple hash for given skb. */
 static inline struct nf_conn *
@@ -165,37 +163,34 @@ static inline void nf_ct_put(struct nf_conn *ct)
 }
 
 /* Protocol module loading */
-extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-extern void nf_ct_l3proto_module_put(unsigned short l3proto);
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
 
 /*
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, unsigned int size);
+void nf_ct_free_hashtable(void *hash, unsigned int size);
 
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
                    const struct nf_conntrack_tuple *tuple);
 
-extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
-extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
+void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
 
-extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
-                             unsigned int nhoff, u_int16_t l3num,
-                             struct nf_conntrack_tuple *tuple);
-extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
-                                const struct nf_conntrack_tuple *orig);
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+                      u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+                         const struct nf_conntrack_tuple *orig);
 
-extern void __nf_ct_refresh_acct(struct nf_conn *ct,
-                                enum ip_conntrack_info ctinfo,
-                                const struct sk_buff *skb,
-                                unsigned long extra_jiffies,
-                                int do_acct);
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                         const struct sk_buff *skb,
+                         unsigned long extra_jiffies, int do_acct);
 
 /* Refresh conntrack for this many jiffies and do accounting */
 static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -214,10 +209,8 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
        __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
 }
 
-extern bool __nf_ct_kill_acct(struct nf_conn *ct,
-                             enum ip_conntrack_info ctinfo,
-                             const struct sk_buff *skb,
-                             int do_acct);
+bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                      const struct sk_buff *skb, int do_acct);
 
 /* kill conntrack and do accounting */
 static inline bool nf_ct_kill_acct(struct nf_conn *ct,
@@ -244,19 +237,17 @@ static inline struct nf_conn *nf_ct_untracked_get(void)
 {
        return &__raw_get_cpu_var(nf_conntrack_untracked);
 }
-extern void nf_ct_untracked_status_or(unsigned long bits);
+void nf_ct_untracked_status_or(unsigned long bits);
 
 /* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-nf_ct_iterate_cleanup(struct net *net,
-                     int (*iter)(struct nf_conn *i, void *data),
-                     void *data, u32 portid, int report);
-extern void nf_conntrack_free(struct nf_conn *ct);
-extern struct nf_conn *
-nf_conntrack_alloc(struct net *net, u16 zone,
-                  const struct nf_conntrack_tuple *orig,
-                  const struct nf_conntrack_tuple *repl,
-                  gfp_t gfp);
+void nf_ct_iterate_cleanup(struct net *net,
+                          int (*iter)(struct nf_conn *i, void *data),
+                          void *data, u32 portid, int report);
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+                                  const struct nf_conntrack_tuple *orig,
+                                  const struct nf_conntrack_tuple *repl,
+                                  gfp_t gfp);
 
 static inline int nf_ct_is_template(const struct nf_conn *ct)
 {
@@ -287,7 +278,7 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 
 struct kernel_param;
 
-extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
index 2bdb7a15fe06b102a5fc88c2cc3da5662ff53a95..fef44edf49c1221fcfd2ce006c18af0c67c761e8 100644 (file)
@@ -42,8 +42,8 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
        return acct;
 };
 
-extern unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
+unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
+                           int dir);
 
 /* Check if connection tracking accounting is enabled */
 static inline bool nf_ct_acct_enabled(struct net *net)
@@ -57,9 +57,9 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
        net->ct.sysctl_acct = enable;
 }
 
-extern int nf_conntrack_acct_pernet_init(struct net *net);
-extern void nf_conntrack_acct_pernet_fini(struct net *net);
+int nf_conntrack_acct_pernet_init(struct net *net);
+void nf_conntrack_acct_pernet_fini(struct net *net);
 
-extern int nf_conntrack_acct_init(void);
-extern void nf_conntrack_acct_fini(void);
+int nf_conntrack_acct_init(void);
+void nf_conntrack_acct_fini(void);
 #endif /* _NF_CONNTRACK_ACCT_H */
index fb2b6234e9375846bdb37727717c7dbd432550d9..15308b8eb5b5218330c3043c46a02537d00779a2 100644 (file)
 /* This header is used to share core functionality between the
    standalone connection tracking module, and the compatibility layer's use
    of connection tracking. */
-extern unsigned int nf_conntrack_in(struct net *net,
-                                   u_int8_t pf,
-                                   unsigned int hooknum,
-                                   struct sk_buff *skb);
-
-extern int nf_conntrack_init_net(struct net *net);
-extern void nf_conntrack_cleanup_net(struct net *net);
-extern void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
-
-extern int nf_conntrack_proto_pernet_init(struct net *net);
-extern void nf_conntrack_proto_pernet_fini(struct net *net);
-
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
-
-extern int nf_conntrack_init_start(void);
-extern void nf_conntrack_cleanup_start(void);
-
-extern void nf_conntrack_init_end(void);
-extern void nf_conntrack_cleanup_end(void);
-
-extern bool
-nf_ct_get_tuple(const struct sk_buff *skb,
-               unsigned int nhoff,
-               unsigned int dataoff,
-               u_int16_t l3num,
-               u_int8_t protonum,
-               struct nf_conntrack_tuple *tuple,
-               const struct nf_conntrack_l3proto *l3proto,
-               const struct nf_conntrack_l4proto *l4proto);
-
-extern bool
-nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
-                  const struct nf_conntrack_tuple *orig,
-                  const struct nf_conntrack_l3proto *l3proto,
-                  const struct nf_conntrack_l4proto *l4proto);
+unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
+                            struct sk_buff *skb);
+
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+
+int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_fini(struct net *net);
+
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
+
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
+
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
+
+bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
+                    unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+                    struct nf_conntrack_tuple *tuple,
+                    const struct nf_conntrack_l3proto *l3proto,
+                    const struct nf_conntrack_l4proto *l4proto);
+
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+                       const struct nf_conntrack_tuple *orig,
+                       const struct nf_conntrack_l3proto *l3proto,
+                       const struct nf_conntrack_l4proto *l4proto);
 
 /* Find a connection corresponding to a tuple. */
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 nf_conntrack_find_get(struct net *net, u16 zone,
                      const struct nf_conntrack_tuple *tuple);
 
-extern int __nf_conntrack_confirm(struct sk_buff *skb);
+int __nf_conntrack_confirm(struct sk_buff *skb);
 
 /* Confirm a connection: returns NF_DROP if packet must be dropped. */
 static inline int nf_conntrack_confirm(struct sk_buff *skb)
index 092dc651689f81d85a6ae686c5d761aee006918d..0e3d08e4b1d3e59fa101607d88e4c66f7664566d 100644 (file)
@@ -68,10 +68,12 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+int nf_conntrack_register_notifier(struct net *net,
+                                  struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net,
+                                     struct nf_ct_event_notifier *nb);
 
-extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -166,8 +168,10 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+int nf_ct_expect_register_notifier(struct net *net,
+                                  struct nf_exp_event_notifier *nb);
+void nf_ct_expect_unregister_notifier(struct net *net,
+                                     struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -207,11 +211,11 @@ nf_ct_expect_event(enum ip_conntrack_expect_events event,
        nf_ct_expect_event_report(event, exp, 0, 0);
 }
 
-extern int nf_conntrack_ecache_pernet_init(struct net *net);
-extern void nf_conntrack_ecache_pernet_fini(struct net *net);
+int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
 
-extern int nf_conntrack_ecache_init(void);
-extern void nf_conntrack_ecache_fini(void);
+int nf_conntrack_ecache_init(void);
+void nf_conntrack_ecache_fini(void);
 #else /* CONFIG_NF_CONNTRACK_EVENTS */
 
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
index 88a1d4060d5260f7ac51e7b2d74f87ab409cd4de..86372ae0ee840a7eb7bfefe6c705ed748e0582ba 100644 (file)
@@ -73,7 +73,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
        ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
 
 /* Destroy all relationships */
-extern void __nf_ct_ext_destroy(struct nf_conn *ct);
+void __nf_ct_ext_destroy(struct nf_conn *ct);
 static inline void nf_ct_ext_destroy(struct nf_conn *ct)
 {
        if (ct->ext)
index 26c4ae5bfbb8df49abbdcf19dc67de9273575601..6cf614bc0029326da74bdb6145d009f95f3422e5 100644 (file)
@@ -52,21 +52,24 @@ struct nf_conntrack_helper {
        unsigned int queue_num;         /* For user-space helpers. */
 };
 
-extern struct nf_conntrack_helper *
-__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+                                                      u16 l3num, u8 protonum);
 
-extern struct nf_conntrack_helper *
-nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+                                                              u16 l3num,
+                                                              u8 protonum);
 
-extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
-extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
 
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
+                                         struct nf_conntrack_helper *helper,
+                                         gfp_t gfp);
 
-extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
-                                    gfp_t flags);
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+                             gfp_t flags);
 
-extern void nf_ct_helper_destroy(struct nf_conn *ct);
+void nf_ct_helper_destroy(struct nf_conn *ct);
 
 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 {
@@ -82,17 +85,16 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
        return (void *)help->data;
 }
 
-extern int nf_conntrack_helper_pernet_init(struct net *net);
-extern void nf_conntrack_helper_pernet_fini(struct net *net);
+int nf_conntrack_helper_pernet_init(struct net *net);
+void nf_conntrack_helper_pernet_fini(struct net *net);
 
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
 
-extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      struct nf_conn *ct,
-                                      enum ip_conntrack_info ctinfo,
-                                      unsigned int timeout);
+int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo,
+                               unsigned int timeout);
 
 struct nf_ct_helper_expectfn {
        struct list_head head;
index 3bb89eac3fa130a477b925d87c441189d27836e1..3efab704b7eb9b303b433549bb342d923e43f67f 100644 (file)
@@ -77,17 +77,17 @@ struct nf_conntrack_l3proto {
 extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol pernet registration. */
-extern int nf_ct_l3proto_pernet_register(struct net *net,
-                                        struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_pernet_unregister(struct net *net,
-                                           struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_pernet_register(struct net *net,
+                                 struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_pernet_unregister(struct net *net,
+                                    struct nf_conntrack_l3proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
 
-extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
+struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
+void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
 
 /* Existing built-in protocols */
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
index b411d7b17dec40a64cc41df315f6188004ac65fa..4c8d573830b7e606098fbd8fa2b5e483111601b0 100644 (file)
@@ -114,22 +114,22 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
 
-extern struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
+struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+                                                 u_int8_t l4proto);
 
-extern struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
-extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
+struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+                                                   u_int8_t l4proto);
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
 
 /* Protocol pernet registration. */
-extern int nf_ct_l4proto_pernet_register(struct net *net,
-                                        struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_pernet_unregister(struct net *net,
-                                           struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_pernet_register(struct net *net,
+                                 struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister(struct net *net,
+                                    struct nf_conntrack_l4proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
 
 static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
 {
@@ -140,11 +140,11 @@ static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
 }
 
 /* Generic netlink helpers */
-extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
-                                     const struct nf_conntrack_tuple *tuple);
-extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
-                                     struct nf_conntrack_tuple *t);
-extern int nf_ct_port_nlattr_tuple_size(void);
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+                              const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+                              struct nf_conntrack_tuple *t);
+int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
index f6177a5fe0cafe6de0e00ac6d2abd1ce9856d897..4b3362991a25f9c9f09b9794c96a0d0e15e89be9 100644 (file)
@@ -30,22 +30,18 @@ static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
        return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
 }
 
-extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                            s32 off);
-extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           __be32 seq, s32 off);
-extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
-                                struct nf_conn *ct,
-                                enum ip_conntrack_info ctinfo,
-                                s32 off);
-
-extern int nf_ct_seq_adjust(struct sk_buff *skb,
-                           struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           unsigned int protoff);
-extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
-                           u32 seq);
-
-extern int nf_conntrack_seqadj_init(void);
-extern void nf_conntrack_seqadj_fini(void);
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                     s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                    __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+                         enum ip_conntrack_info ctinfo, s32 off);
+
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+                    enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
+
+int nf_conntrack_seqadj_init(void);
+void nf_conntrack_seqadj_fini(void);
 
 #endif /* _NF_CONNTRACK_SEQADJ_H */
index 806f54a290d60476e0e25193d3565b611febece3..6793614e6502a0eb51ca7efabc593e7bb6866576 100644 (file)
@@ -56,22 +56,20 @@ struct synproxy_options {
 
 struct tcphdr;
 struct xt_synproxy_info;
-extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
-                                  const struct tcphdr *th,
-                                  struct synproxy_options *opts);
-extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
-extern void synproxy_build_options(struct tcphdr *th,
-                                  const struct synproxy_options *opts);
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+                           const struct tcphdr *th,
+                           struct synproxy_options *opts);
+unsigned int synproxy_options_size(const struct synproxy_options *opts);
+void synproxy_build_options(struct tcphdr *th,
+                           const struct synproxy_options *opts);
 
-extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
-                                          struct synproxy_options *opts);
-extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+                                   struct synproxy_options *opts);
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
 
-extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
-                                          unsigned int protoff,
-                                          struct tcphdr *th,
-                                          struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo,
-                                          const struct nf_conn_synproxy *synproxy);
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
+                                   struct tcphdr *th, struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   const struct nf_conn_synproxy *synproxy);
 
 #endif /* _NF_CONNTRACK_SYNPROXY_H */
index d23aceb16d9443a865bc0e789a60089b0d9c40bc..62308713dd7fa1704409e7db22fca725cc0200b9 100644 (file)
@@ -76,8 +76,8 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern int nf_conntrack_timeout_init(void);
-extern void nf_conntrack_timeout_fini(void);
+int nf_conntrack_timeout_init(void);
+void nf_conntrack_timeout_fini(void);
 #else
 static inline int nf_conntrack_timeout_init(void)
 {
index b00461413efd4d49ec3e11ba4a3e0786594a32f2..300ae2209f251e7e669c8c353b8de7fb50b16f55 100644 (file)
@@ -48,11 +48,11 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-extern int nf_conntrack_tstamp_pernet_init(struct net *net);
-extern void nf_conntrack_tstamp_pernet_fini(struct net *net);
+int nf_conntrack_tstamp_pernet_init(struct net *net);
+void nf_conntrack_tstamp_pernet_fini(struct net *net);
 
-extern int nf_conntrack_tstamp_init(void);
-extern void nf_conntrack_tstamp_fini(void);
+int nf_conntrack_tstamp_init(void);
+void nf_conntrack_tstamp_fini(void);
 #else
 static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
 {
index 59a1924200536b9fc5847998b7129c8aefeb7060..07eaaf60409215198961cea9834c2d770a90f02e 100644 (file)
@@ -41,13 +41,16 @@ struct nf_conn_nat {
 };
 
 /* Set up the info structure to map into this range. */
-extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-                                     const struct nf_nat_range *range,
-                                     enum nf_nat_manip_type maniptype);
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+                              const struct nf_nat_range *range,
+                              enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+                                             unsigned int hooknum);
 
 /* Is this tuple already taken? (not by us)*/
-extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
-                            const struct nf_conn *ignored_conntrack);
+int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+                     const struct nf_conn *ignored_conntrack);
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
index 972e1e47ec79819f610cf9081be732b9131aa46a..fbfd1ba4254e0c356ca382626cae9d7653e8295e 100644 (file)
@@ -7,12 +7,10 @@
 /* This header used to share core functionality between the standalone
    NAT module, and the compatibility layer's use of NAT for masquerading. */
 
-extern unsigned int nf_nat_packet(struct nf_conn *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned int hooknum,
-                                 struct sk_buff *skb);
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                          unsigned int hooknum, struct sk_buff *skb);
 
-extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
index 404324d1d0c4c2a07cb14352ec8623a53b08b541..01bcc6bfbcc9034e399a938a6a6dd83b2bed6ef7 100644 (file)
@@ -7,14 +7,11 @@
 struct sk_buff;
 
 /* These return true or false. */
-extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
-                                     struct nf_conn *ct,
-                                     enum ip_conntrack_info ctinfo,
-                                     unsigned int protoff,
-                                     unsigned int match_offset,
-                                     unsigned int match_len,
-                                     const char *rep_buffer,
-                                     unsigned int rep_len, bool adjust);
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+                              enum ip_conntrack_info ctinfo,
+                              unsigned int protoff, unsigned int match_offset,
+                              unsigned int match_len, const char *rep_buffer,
+                              unsigned int rep_len, bool adjust);
 
 static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                           struct nf_conn *ct,
@@ -30,18 +27,14 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                          rep_buffer, rep_len, true);
 }
 
-extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
-                                   struct nf_conn *ct,
-                                   enum ip_conntrack_info ctinfo,
-                                   unsigned int protoff,
-                                   unsigned int match_offset,
-                                   unsigned int match_len,
-                                   const char *rep_buffer,
-                                   unsigned int rep_len);
+int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+                            enum ip_conntrack_info ctinfo,
+                            unsigned int protoff, unsigned int match_offset,
+                            unsigned int match_len, const char *rep_buffer,
+                            unsigned int rep_len);
 
 /* Setup NAT on this expected conntrack so it follows master, but goes
  * to port ct->master->saved_proto. */
-extern void nf_nat_follow_master(struct nf_conn *ct,
-                                struct nf_conntrack_expect *this);
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
 
 #endif
index bd3b97e02c82a86eaf8aafc20a3a94b71830fa1f..5a2919b2e09af396540348bad01f12b7deab5ef9 100644 (file)
@@ -35,18 +35,15 @@ struct nf_nat_l3proto {
                                   struct nf_nat_range *range);
 };
 
-extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
-extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
-extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
-
-extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
-                                        struct nf_conn *ct,
-                                        enum ip_conntrack_info ctinfo,
-                                        unsigned int hooknum);
-extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
-                                          struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo,
-                                          unsigned int hooknum,
-                                          unsigned int hdrlen);
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+                                 enum ip_conntrack_info ctinfo,
+                                 unsigned int hooknum);
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   unsigned int hooknum, unsigned int hdrlen);
 
 #endif /* _NF_NAT_L3PROTO_H */
index 24feb68d1bccd6698be75deaa15f8bb91a7bf0d8..12f4cc841b6eddba6bdfc4132e448c31781b4e84 100644 (file)
@@ -42,10 +42,11 @@ struct nf_nat_l4proto {
 };
 
 /* Protocol registration. */
-extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+void nf_nat_l4proto_unregister(u8 l3proto,
+                              const struct nf_nat_l4proto *l4proto);
 
-extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
 
 /* Built-in protocols. */
 extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
@@ -54,19 +55,18 @@ extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
 extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
 extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
 
-extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
-                                   enum nf_nat_manip_type maniptype,
-                                   const union nf_conntrack_man_proto *min,
-                                   const union nf_conntrack_man_proto *max);
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                            enum nf_nat_manip_type maniptype,
+                            const union nf_conntrack_man_proto *min,
+                            const union nf_conntrack_man_proto *max);
 
-extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
-                                       struct nf_conntrack_tuple *tuple,
-                                       const struct nf_nat_range *range,
-                                       enum nf_nat_manip_type maniptype,
-                                       const struct nf_conn *ct,
-                                       u16 *rover);
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
+                                enum nf_nat_manip_type maniptype,
+                                const struct nf_conn *ct, u16 *rover);
 
-extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
-                                         struct nf_nat_range *range);
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                  struct nf_nat_range *range);
 
 #endif /*_NF_NAT_L4PROTO_H*/
index aaba4bbcdda0e9e7b4b7adfcb4c19e3555bb4219..c1d5b3e34a211a8df6fd4eba749b4fcd9e62131f 100644 (file)
@@ -28,7 +28,7 @@ struct nf_queue_handler {
 
 void nf_register_queue_handler(const struct nf_queue_handler *qh);
 void nf_unregister_queue_handler(void);
-extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644 (file)
index 0000000..5a91abf
--- /dev/null
@@ -0,0 +1,519 @@
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netlink.h>
+
+#define NFT_JUMP_STACK_SIZE    16
+
+struct nft_pktinfo {
+       struct sk_buff                  *skb;
+       const struct net_device         *in;
+       const struct net_device         *out;
+       u8                              hooknum;
+       u8                              nhoff;
+       u8                              thoff;
+       /* for x_tables compatibility */
+       struct xt_action_param          xt;
+};
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+                                  const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
+                                  const struct net_device *in,
+                                  const struct net_device *out)
+{
+       pkt->skb = skb;
+       pkt->in = pkt->xt.in = in;
+       pkt->out = pkt->xt.out = out;
+       pkt->hooknum = pkt->xt.hooknum = ops->hooknum;
+       pkt->xt.family = ops->pf;
+}
+
+struct nft_data {
+       union {
+               u32                             data[4];
+               struct {
+                       u32                     verdict;
+                       struct nft_chain        *chain;
+               };
+       };
+} __attribute__((aligned(__alignof__(u64))));
+
+static inline int nft_data_cmp(const struct nft_data *d1,
+                              const struct nft_data *d2,
+                              unsigned int len)
+{
+       return memcmp(d1->data, d2->data, len);
+}
+
+static inline void nft_data_copy(struct nft_data *dst,
+                                const struct nft_data *src)
+{
+       BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
+       *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
+       *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+}
+
+static inline void nft_data_debug(const struct nft_data *data)
+{
+       pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
+                data->data[0], data->data[1],
+                data->data[2], data->data[3]);
+}
+
+/**
+ *     struct nft_ctx - nf_tables rule/set context
+ *
+ *     @net: net namespace
+ *     @skb: netlink skb
+ *     @nlh: netlink message header
+ *     @afi: address family info
+ *     @table: the table the chain is contained in
+ *     @chain: the chain the rule is contained in
+ *     @nla: netlink attributes
+ */
+struct nft_ctx {
+       struct net                      *net;
+       const struct sk_buff            *skb;
+       const struct nlmsghdr           *nlh;
+       const struct nft_af_info        *afi;
+       const struct nft_table          *table;
+       const struct nft_chain          *chain;
+       const struct nlattr * const     *nla;
+};
+
+struct nft_data_desc {
+       enum nft_data_types             type;
+       unsigned int                    len;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+                 struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+                 enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+       return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+       return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+}
+
+int nft_validate_input_register(enum nft_registers reg);
+int nft_validate_output_register(enum nft_registers reg);
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+                          const struct nft_data *data,
+                          enum nft_data_types type);
+
+/**
+ *     struct nft_set_elem - generic representation of set elements
+ *
+ *     @cookie: implementation specific element cookie
+ *     @key: element key
+ *     @data: element data (maps only)
+ *     @flags: element flags (end of interval)
+ *
+ *     The cookie can be used to store a handle to the element for subsequent
+ *     removal.
+ */
+struct nft_set_elem {
+       void                    *cookie;
+       struct nft_data         key;
+       struct nft_data         data;
+       u32                     flags;
+};
+
+struct nft_set;
+struct nft_set_iter {
+       unsigned int    count;
+       unsigned int    skip;
+       int             err;
+       int             (*fn)(const struct nft_ctx *ctx,
+                             const struct nft_set *set,
+                             const struct nft_set_iter *iter,
+                             const struct nft_set_elem *elem);
+};
+
+/**
+ *     struct nft_set_ops - nf_tables set operations
+ *
+ *     @lookup: look up an element within the set
+ *     @insert: insert new element into set
+ *     @remove: remove element from set
+ *     @walk: iterate over all set elemeennts
+ *     @privsize: function to return size of set private data
+ *     @init: initialize private data of new set instance
+ *     @destroy: destroy private data of set instance
+ *     @list: nf_tables_set_ops list node
+ *     @owner: module reference
+ *     @features: features supported by the implementation
+ */
+struct nft_set_ops {
+       bool                            (*lookup)(const struct nft_set *set,
+                                                 const struct nft_data *key,
+                                                 struct nft_data *data);
+       int                             (*get)(const struct nft_set *set,
+                                              struct nft_set_elem *elem);
+       int                             (*insert)(const struct nft_set *set,
+                                                 const struct nft_set_elem *elem);
+       void                            (*remove)(const struct nft_set *set,
+                                                 const struct nft_set_elem *elem);
+       void                            (*walk)(const struct nft_ctx *ctx,
+                                               const struct nft_set *set,
+                                               struct nft_set_iter *iter);
+
+       unsigned int                    (*privsize)(const struct nlattr * const nla[]);
+       int                             (*init)(const struct nft_set *set,
+                                               const struct nlattr * const nla[]);
+       void                            (*destroy)(const struct nft_set *set);
+
+       struct list_head                list;
+       struct module                   *owner;
+       u32                             features;
+};
+
+int nft_register_set(struct nft_set_ops *ops);
+void nft_unregister_set(struct nft_set_ops *ops);
+
+/**
+ *     struct nft_set - nf_tables set instance
+ *
+ *     @list: table set list node
+ *     @bindings: list of set bindings
+ *     @name: name of the set
+ *     @ktype: key type (numeric type defined by userspace, not used in the kernel)
+ *     @dtype: data type (verdict or numeric type defined by userspace)
+ *     @ops: set ops
+ *     @flags: set flags
+ *     @klen: key length
+ *     @dlen: data length
+ *     @data: private set data
+ */
+struct nft_set {
+       struct list_head                list;
+       struct list_head                bindings;
+       char                            name[IFNAMSIZ];
+       u32                             ktype;
+       u32                             dtype;
+       /* runtime data below here */
+       const struct nft_set_ops        *ops ____cacheline_aligned;
+       u16                             flags;
+       u8                              klen;
+       u8                              dlen;
+       unsigned char                   data[]
+               __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+       return (void *)set->data;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+                                    const struct nlattr *nla);
+
+/**
+ *     struct nft_set_binding - nf_tables set binding
+ *
+ *     @list: set bindings list node
+ *     @chain: chain containing the rule bound to the set
+ *
+ *     A set binding contains all information necessary for validation
+ *     of new elements added to a bound set.
+ */
+struct nft_set_binding {
+       struct list_head                list;
+       const struct nft_chain          *chain;
+};
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                      struct nft_set_binding *binding);
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding);
+
+
+/**
+ *     struct nft_expr_type - nf_tables expression type
+ *
+ *     @select_ops: function to select nft_expr_ops
+ *     @ops: default ops, used when no select_ops functions is present
+ *     @list: used internally
+ *     @name: Identifier
+ *     @owner: module reference
+ *     @policy: netlink attribute policy
+ *     @maxattr: highest netlink attribute number
+ */
+struct nft_expr_type {
+       const struct nft_expr_ops       *(*select_ops)(const struct nft_ctx *,
+                                                      const struct nlattr * const tb[]);
+       const struct nft_expr_ops       *ops;
+       struct list_head                list;
+       const char                      *name;
+       struct module                   *owner;
+       const struct nla_policy         *policy;
+       unsigned int                    maxattr;
+};
+
+/**
+ *     struct nft_expr_ops - nf_tables expression operations
+ *
+ *     @eval: Expression evaluation function
+ *     @size: full expression size, including private data size
+ *     @init: initialization function
+ *     @destroy: destruction function
+ *     @dump: function to dump parameters
+ *     @type: expression type
+ *     @validate: validate expression, called during loop detection
+ *     @data: extra data to attach to this expression operation
+ */
+struct nft_expr;
+struct nft_expr_ops {
+       void                            (*eval)(const struct nft_expr *expr,
+                                               struct nft_data data[NFT_REG_MAX + 1],
+                                               const struct nft_pktinfo *pkt);
+       unsigned int                    size;
+
+       int                             (*init)(const struct nft_ctx *ctx,
+                                               const struct nft_expr *expr,
+                                               const struct nlattr * const tb[]);
+       void                            (*destroy)(const struct nft_expr *expr);
+       int                             (*dump)(struct sk_buff *skb,
+                                               const struct nft_expr *expr);
+       int                             (*validate)(const struct nft_ctx *ctx,
+                                                   const struct nft_expr *expr,
+                                                   const struct nft_data **data);
+       const struct nft_expr_type      *type;
+       void                            *data;
+};
+
+#define NFT_EXPR_MAXATTR               16
+#define NFT_EXPR_SIZE(size)            (sizeof(struct nft_expr) + \
+                                        ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ *     struct nft_expr - nf_tables expression
+ *
+ *     @ops: expression ops
+ *     @data: expression private data
+ */
+struct nft_expr {
+       const struct nft_expr_ops       *ops;
+       unsigned char                   data[];
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+       return (void *)expr->data;
+}
+
+/**
+ *     struct nft_rule - nf_tables rule
+ *
+ *     @list: used internally
+ *     @rcu_head: used internally for rcu
+ *     @handle: rule handle
+ *     @genmask: generation mask
+ *     @dlen: length of expression data
+ *     @data: expression data
+ */
+struct nft_rule {
+       struct list_head                list;
+       struct rcu_head                 rcu_head;
+       u64                             handle:46,
+                                       genmask:2,
+                                       dlen:16;
+       unsigned char                   data[]
+               __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+/**
+ *     struct nft_rule_trans - nf_tables rule update in transaction
+ *
+ *     @list: used internally
+ *     @rule: rule that needs to be updated
+ *     @chain: chain that this rule belongs to
+ *     @table: table for which this chain applies
+ *     @nlh: netlink header of the message that contain this update
+ *     @family: family expressesed as AF_*
+ */
+struct nft_rule_trans {
+       struct list_head                list;
+       struct nft_rule                 *rule;
+       const struct nft_chain          *chain;
+       const struct nft_table          *table;
+       const struct nlmsghdr           *nlh;
+       u8                              family;
+};
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+       return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+       return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+       return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+       for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+            (expr) != (last); \
+            (expr) = nft_expr_next(expr))
+
+enum nft_chain_flags {
+       NFT_BASE_CHAIN                  = 0x1,
+};
+
+/**
+ *     struct nft_chain - nf_tables chain
+ *
+ *     @rules: list of rules in the chain
+ *     @list: used internally
+ *     @rcu_head: used internally
+ *     @net: net namespace that this chain belongs to
+ *     @table: table that this chain belongs to
+ *     @handle: chain handle
+ *     @flags: bitmask of enum nft_chain_flags
+ *     @use: number of jump references to this chain
+ *     @level: length of longest path to this chain
+ *     @name: name of the chain
+ */
+struct nft_chain {
+       struct list_head                rules;
+       struct list_head                list;
+       struct rcu_head                 rcu_head;
+       struct net                      *net;
+       struct nft_table                *table;
+       u64                             handle;
+       u8                              flags;
+       u16                             use;
+       u16                             level;
+       char                            name[NFT_CHAIN_MAXNAMELEN];
+};
+
+enum nft_chain_type {
+       NFT_CHAIN_T_DEFAULT = 0,
+       NFT_CHAIN_T_ROUTE,
+       NFT_CHAIN_T_NAT,
+       NFT_CHAIN_T_MAX
+};
+
+struct nft_stats {
+       u64 bytes;
+       u64 pkts;
+};
+
+/**
+ *     struct nft_base_chain - nf_tables base chain
+ *
+ *     @ops: netfilter hook ops
+ *     @type: chain type
+ *     @policy: default policy
+ *     @stats: per-cpu chain stats
+ *     @chain: the chain
+ */
+struct nft_base_chain {
+       struct nf_hook_ops              ops;
+       enum nft_chain_type             type;
+       u8                              policy;
+       struct nft_stats __percpu       *stats;
+       struct nft_chain                chain;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+       return container_of(chain, struct nft_base_chain, chain);
+}
+
+unsigned int nft_do_chain_pktinfo(struct nft_pktinfo *pkt,
+                                 const struct nf_hook_ops *ops);
+
+/**
+ *     struct nft_table - nf_tables table
+ *
+ *     @list: used internally
+ *     @chains: chains in the table
+ *     @sets: sets in the table
+ *     @hgenerator: handle generator state
+ *     @use: number of chain references to this table
+ *     @flags: table flag (see enum nft_table_flags)
+ *     @name: name of the table
+ */
+struct nft_table {
+       struct list_head                list;
+       struct list_head                chains;
+       struct list_head                sets;
+       u64                             hgenerator;
+       u32                             use;
+       u16                             flags;
+       char                            name[];
+};
+
+/**
+ *     struct nft_af_info - nf_tables address family info
+ *
+ *     @list: used internally
+ *     @family: address family
+ *     @nhooks: number of hooks in this family
+ *     @owner: module owner
+ *     @tables: used internally
+ *     @hooks: hookfn overrides for packet validation
+ */
+struct nft_af_info {
+       struct list_head                list;
+       int                             family;
+       unsigned int                    nhooks;
+       struct module                   *owner;
+       struct list_head                tables;
+       nf_hookfn                       *hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_afinfo(struct net *, struct nft_af_info *);
+void nft_unregister_afinfo(struct nft_af_info *);
+
+struct nf_chain_type {
+       unsigned int            hook_mask;
+       const char              *name;
+       enum nft_chain_type     type;
+       nf_hookfn               *fn[NF_MAX_HOOKS];
+       struct module           *me;
+       int                     family;
+};
+
+int nft_register_chain_type(struct nf_chain_type *);
+void nft_unregister_chain_type(struct nf_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+#define MODULE_ALIAS_NFT_FAMILY(family)        \
+       MODULE_ALIAS("nft-afinfo-" __stringify(family))
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+       MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+       MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_SET() \
+       MODULE_ALIAS("nft-set")
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644 (file)
index 0000000..cf2b7ae
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+int nft_immediate_module_init(void);
+void nft_immediate_module_exit(void);
+
+struct nft_cmp_fast_expr {
+       u32                     data;
+       enum nft_registers      sreg:8;
+       u8                      len;
+};
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+
+int nft_cmp_module_init(void);
+void nft_cmp_module_exit(void);
+
+int nft_lookup_module_init(void);
+void nft_lookup_module_exit(void);
+
+int nft_bitwise_module_init(void);
+void nft_bitwise_module_exit(void);
+
+int nft_byteorder_module_init(void);
+void nft_byteorder_module_exit(void);
+
+struct nft_payload {
+       enum nft_payload_bases  base:8;
+       u8                      offset;
+       u8                      len;
+       enum nft_registers      dreg:8;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+int nft_payload_module_init(void);
+void nft_payload_module_exit(void);
+
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644 (file)
index 0000000..1be1c2c
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void
+nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+                    const struct nf_hook_ops *ops,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out)
+{
+       struct iphdr *ip;
+
+       nft_set_pktinfo(pkt, ops, skb, in, out);
+
+       pkt->xt.thoff = ip_hdrlen(pkt->skb);
+       ip = ip_hdr(pkt->skb);
+       pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644 (file)
index 0000000..4a9b88a
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+
+static inline int
+nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                    const struct nf_hook_ops *ops,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out)
+{
+       int protohdr, thoff = 0;
+       unsigned short frag_off;
+
+       nft_set_pktinfo(pkt, ops, skb, in, out);
+
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       /* If malformed, drop it */
+       if (protohdr < 0)
+               return -1;
+
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = frag_off;
+
+       return 0;
+}
+
+#endif
index 495c71f66e7e9334b3506174dc7e5352399938f4..79f45e19f31eaa54deb8437ef4b883bec78def84 100644 (file)
@@ -16,7 +16,7 @@ struct xt_rateest {
        struct rcu_head                 rcu;
 };
 
-extern struct xt_rateest *xt_rateest_lookup(const char *name);
-extern void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(const char *name);
+void xt_rateest_put(struct xt_rateest *est);
 
 #endif /* _XT_RATEEST_H */
index 9690b0f6698a1d1b433ea572bdc4efb938f501c5..2b47eaadba8fad95b9bde8aeeace37fa03d1fc31 100644 (file)
@@ -225,44 +225,31 @@ struct nl_info {
        u32                     portid;
 };
 
-extern int             netlink_rcv_skb(struct sk_buff *skb,
-                                       int (*cb)(struct sk_buff *,
-                                                 struct nlmsghdr *));
-extern int             nlmsg_notify(struct sock *sk, struct sk_buff *skb,
-                                    u32 portid, unsigned int group, int report,
-                                    gfp_t flags);
-
-extern int             nla_validate(const struct nlattr *head,
-                                    int len, int maxtype,
-                                    const struct nla_policy *policy);
-extern int             nla_parse(struct nlattr **tb, int maxtype,
-                                 const struct nlattr *head, int len,
-                                 const struct nla_policy *policy);
-extern int             nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr * nla_find(const struct nlattr *head,
-                                int len, int attrtype);
-extern size_t          nla_strlcpy(char *dst, const struct nlattr *nla,
-                                   size_t dstsize);
-extern int             nla_memcpy(void *dest, const struct nlattr *src, int count);
-extern int             nla_memcmp(const struct nlattr *nla, const void *data,
-                                  size_t size);
-extern int             nla_strcmp(const struct nlattr *nla, const char *str);
-extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
-                                     int attrlen);
-extern void *          __nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
-                                   int attrlen);
-extern void *          nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern void            __nla_put(struct sk_buff *skb, int attrtype,
-                                 int attrlen, const void *data);
-extern void            __nla_put_nohdr(struct sk_buff *skb, int attrlen,
-                                       const void *data);
-extern int             nla_put(struct sk_buff *skb, int attrtype,
-                               int attrlen, const void *data);
-extern int             nla_put_nohdr(struct sk_buff *skb, int attrlen,
-                                     const void *data);
-extern int             nla_append(struct sk_buff *skb, int attrlen,
-                                  const void *data);
+int netlink_rcv_skb(struct sk_buff *skb,
+                   int (*cb)(struct sk_buff *, struct nlmsghdr *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+                unsigned int group, int report, gfp_t flags);
+
+int nla_validate(const struct nlattr *head, int len, int maxtype,
+                const struct nla_policy *policy);
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+             int len, const struct nla_policy *policy);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+              const void *data);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
 
 /**************************************************************************
  * Netlink Messages
index bf2ec2202c5698b1bb73e22df60b4a3bc9d95c01..ee520cba2ec2f9f1ce1f011b985524946085fb60 100644 (file)
@@ -15,6 +15,10 @@ struct fib_rules_ops;
 struct hlist_head;
 struct fib_table;
 struct sock;
+struct local_ports {
+       seqlock_t       lock;
+       int             range[2];
+};
 
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
@@ -62,10 +66,11 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
+       struct local_ports sysctl_local_ports;
+
        int sysctl_tcp_ecn;
 
        kgid_t sysctl_ping_group_range[2];
-       long sysctl_tcp_mem[3];
 
        atomic_t dev_addr_genid;
 
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644 (file)
index 0000000..15d056d
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct nft_af_info;
+
+struct netns_nftables {
+       struct list_head        af_info;
+       struct list_head        commit_list;
+       struct nft_af_info      *ipv4;
+       struct nft_af_info      *ipv6;
+       struct nft_af_info      *arp;
+       struct nft_af_info      *bridge;
+       u8                      gencursor;
+       u8                      genctr;
+};
+
+#endif
index 121dcf854db54ea418aff35a05677013d05e53bc..110350aca3dfe1c846e05f3f9a61e5cc8afe64d4 100644 (file)
@@ -183,51 +183,50 @@ extern int  sysctl_netrom_routing_control;
 extern int  sysctl_netrom_link_fails_count;
 extern int  sysctl_netrom_reset_circuit;
 
-extern int  nr_rx_frame(struct sk_buff *, struct net_device *);
-extern void nr_destroy_socket(struct sock *);
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
 
 /* nr_dev.c */
-extern int  nr_rx_ip(struct sk_buff *, struct net_device *);
-extern void nr_setup(struct net_device *);
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
 
 /* nr_in.c */
-extern int  nr_process_rx_frame(struct sock *, struct sk_buff *);
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* nr_loopback.c */
-extern void nr_loopback_init(void);
-extern void nr_loopback_clear(void);
-extern int  nr_loopback_queue(struct sk_buff *);
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
 
 /* nr_out.c */
-extern void nr_output(struct sock *, struct sk_buff *);
-extern void nr_send_nak_frame(struct sock *);
-extern void nr_kick(struct sock *);
-extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_establish_data_link(struct sock *);
-extern void nr_enquiry_response(struct sock *);
-extern void nr_check_iframes_acked(struct sock *, unsigned short);
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
 
 /* nr_route.c */
-extern void nr_rt_device_down(struct net_device *);
-extern struct net_device *nr_dev_first(void);
-extern struct net_device *nr_dev_get(ax25_address *);
-extern int  nr_rt_ioctl(unsigned int, void __user *);
-extern void nr_link_failed(ax25_cb *, int);
-extern int  nr_route_frame(struct sk_buff *, ax25_cb *);
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
 extern const struct file_operations nr_nodes_fops;
 extern const struct file_operations nr_neigh_fops;
-extern void nr_rt_free(void);
+void nr_rt_free(void);
 
 /* nr_subr.c */
-extern void nr_clear_queues(struct sock *);
-extern void nr_frames_acked(struct sock *, unsigned short);
-extern void nr_requeue_frames(struct sock *);
-extern int  nr_validate_nr(struct sock *, unsigned short);
-extern int  nr_in_rx_window(struct sock *, unsigned short);
-extern void nr_write_internal(struct sock *, int);
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
 
-extern void __nr_transmit_reply(struct sk_buff *skb, int mine,
-       unsigned char cmdflags);
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
 
 /*
  * This routine is called when a Connect Acknowledge with the Choke Flag
@@ -247,24 +246,24 @@ do {                                                                      \
        __nr_transmit_reply((skb), (mine), NR_RESET);                   \
 } while (0)
 
-extern void nr_disconnect(struct sock *, int);
+void nr_disconnect(struct sock *, int);
 
 /* nr_timer.c */
-extern void nr_init_timers(struct sock *sk);
-extern void nr_start_heartbeat(struct sock *);
-extern void nr_start_t1timer(struct sock *);
-extern void nr_start_t2timer(struct sock *);
-extern void nr_start_t4timer(struct sock *);
-extern void nr_start_idletimer(struct sock *);
-extern void nr_stop_heartbeat(struct sock *);
-extern void nr_stop_t1timer(struct sock *);
-extern void nr_stop_t2timer(struct sock *);
-extern void nr_stop_t4timer(struct sock *);
-extern void nr_stop_idletimer(struct sock *);
-extern int  nr_t1timer_running(struct sock *);
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-extern void nr_register_sysctl(void);
-extern void nr_unregister_sysctl(void);
+void nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
 
 #endif
index 42e9fac51b3115f86d0a766d8d19c90cc812ac0e..05e41383856bdb0a10db5606c5fbd3040a297a21 100644 (file)
@@ -1,13 +1,13 @@
 #ifndef _NET_P8022_H
 #define _NET_P8022_H
-extern struct datalink_proto *
-       register_8022_client(unsigned char type,
-                            int (*func)(struct sk_buff *skb,
-                                        struct net_device *dev,
-                                        struct packet_type *pt,
-                                        struct net_device *orig_dev));
-extern void unregister_8022_client(struct datalink_proto *proto);
+struct datalink_proto *
+register_8022_client(unsigned char type,
+                    int (*func)(struct sk_buff *skb,
+                                struct net_device *dev,
+                                struct packet_type *pt,
+                                struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
 
-extern struct datalink_proto *make_8023_client(void);
-extern void destroy_8023_client(struct datalink_proto *dl);
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
 #endif
index 5db0224b73ac47b1513ffb9361e48aa0167bb24d..3f67704f3747281b3ad0852fd5d69dcd49c9b01c 100644 (file)
@@ -103,8 +103,8 @@ void ping_seq_stop(struct seq_file *seq, void *v);
 int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
 void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
 
-extern int __init ping_proc_init(void);
-extern void ping_proc_exit(void);
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
 #endif
 
 void __init ping_init(void);
index 047c0476c0a095c2b271713ca232f7099564558b..fbf7676c9a02e352890b66d0bc3caf1775dbb199 100644 (file)
@@ -96,20 +96,20 @@ extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
-extern int     inet_add_protocol(const struct net_protocol *prot, unsigned char num);
-extern int     inet_del_protocol(const struct net_protocol *prot, unsigned char num);
-extern int     inet_add_offload(const struct net_offload *prot, unsigned char num);
-extern int     inet_del_offload(const struct net_offload *prot, unsigned char num);
-extern void    inet_register_protosw(struct inet_protosw *p);
-extern void    inet_unregister_protosw(struct inet_protosw *p);
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
 
 #if IS_ENABLED(CONFIG_IPV6)
-extern int     inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int     inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int     inet6_register_protosw(struct inet_protosw *p);
-extern void    inet6_unregister_protosw(struct inet_protosw *p);
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
 #endif
-extern int     inet6_add_offload(const struct net_offload *prot, unsigned char num);
-extern int     inet6_del_offload(const struct net_offload *prot, unsigned char num);
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
 
 #endif /* _PROTOCOL_H */
index fe456c295b04400e3e05d489ea7d22349ec486b4..78db4cc1306a147ccb178265f265b9fd8e864ed5 100644 (file)
@@ -1,11 +1,11 @@
 #ifndef _NET_PSNAP_H
 #define _NET_PSNAP_H
 
-extern struct datalink_proto *
+struct datalink_proto *
 register_snap_client(const unsigned char *desc,
                     int (*rcvfunc)(struct sk_buff *, struct net_device *,
                                    struct packet_type *,
                                    struct net_device *orig_dev));
-extern void unregister_snap_client(struct datalink_proto *proto);
+void unregister_snap_client(struct datalink_proto *proto);
 
 #endif
index 42ce6fe7a2d519c84c0f171cebaad87f537b23b9..6a40c6562dd2a39ac5b4f6ee4a1331b0559bfb02 100644 (file)
@@ -26,7 +26,7 @@ extern struct proto raw_prot;
 void raw_icmp_error(struct sk_buff *, int, u32);
 int raw_local_deliver(struct sk_buff *, int);
 
-extern int     raw_rcv(struct sock *, struct sk_buff *);
+int raw_rcv(struct sock *, struct sk_buff *);
 
 #define RAW_HTABLE_SIZE        MAX_INET_PROTOS
 
@@ -36,8 +36,8 @@ struct raw_hashinfo {
 };
 
 #ifdef CONFIG_PROC_FS
-extern int  raw_proc_init(void);
-extern void raw_proc_exit(void);
+int raw_proc_init(void);
+void raw_proc_exit(void);
 
 struct raw_iter_state {
        struct seq_net_private p;
index e7ea660e4db606139e54567d2ed7d14b93eec4f4..87783dea0791c616a9bac5a164e4421dc3ef2363 100644 (file)
@@ -7,8 +7,7 @@ void raw6_icmp_error(struct sk_buff *, int nexthdr,
                u8 type, u8 code, int inner_offset, __be32);
 bool raw6_local_deliver(struct sk_buff *, int);
 
-extern int                     rawv6_rcv(struct sock *sk,
-                                         struct sk_buff *skb);
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
index 59795e42c8b61464512184990c3a12698c9a184f..7f830ff67f08c318717e809221d1586f17948185 100644 (file)
@@ -43,11 +43,12 @@ struct request_sock_ops {
                                           struct request_sock *req);
 };
 
-extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
 
 /* struct request_sock - mini sock to represent a connection request
  */
 struct request_sock {
+       struct sock_common              __req_common;
        struct request_sock             *dl_next;
        u16                             mss;
        u8                              num_retrans; /* number of retransmits */
@@ -162,13 +163,13 @@ struct request_sock_queue {
                                             */
 };
 
-extern int reqsk_queue_alloc(struct request_sock_queue *queue,
-                            unsigned int nr_table_entries);
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+                     unsigned int nr_table_entries);
 
-extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_fastopen_remove(struct sock *sk,
-                                 struct request_sock *req, bool reset);
+void __reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+                          bool reset);
 
 static inline struct request_sock *
        reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
index 555dd198aab755c9e9ef21a751992ccf653b64c7..50811fe2c585b0e9fd8ffb365971229872eb353e 100644 (file)
@@ -160,38 +160,42 @@ extern int  sysctl_rose_routing_control;
 extern int  sysctl_rose_link_fail_timeout;
 extern int  sysctl_rose_maximum_vcs;
 extern int  sysctl_rose_window_size;
-extern int  rosecmp(rose_address *, rose_address *);
-extern int  rosecmpm(rose_address *, rose_address *, unsigned short);
-extern char *rose2asc(char *buf, const rose_address *);
-extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
-extern void rose_kill_by_neigh(struct rose_neigh *);
-extern unsigned int rose_new_lci(struct rose_neigh *);
-extern int  rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
-extern void rose_destroy_socket(struct sock *);
+
+int rosecmp(rose_address *, rose_address *);
+int rosecmpm(rose_address *, rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+                        struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
 
 /* rose_dev.c */
-extern void  rose_setup(struct net_device *);
+void rose_setup(struct net_device *);
 
 /* rose_in.c */
-extern int  rose_process_rx_frame(struct sock *, struct sk_buff *);
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* rose_link.c */
-extern void rose_start_ftimer(struct rose_neigh *);
-extern void rose_stop_ftimer(struct rose_neigh *);
-extern void rose_stop_t0timer(struct rose_neigh *);
-extern int  rose_ftimer_running(struct rose_neigh *);
-extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
-extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
-extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+                         unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+                                unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
 
 /* rose_loopback.c */
-extern void rose_loopback_init(void);
-extern void rose_loopback_clear(void);
-extern int  rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
 
 /* rose_out.c */
-extern void rose_kick(struct sock *);
-extern void rose_enquiry_response(struct sock *);
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
 
 /* rose_route.c */
 extern struct rose_neigh *rose_loopback_neigh;
@@ -199,43 +203,45 @@ extern const struct file_operations rose_neigh_fops;
 extern const struct file_operations rose_nodes_fops;
 extern const struct file_operations rose_routes_fops;
 
-extern void rose_add_loopback_neigh(void);
-extern int __must_check rose_add_loopback_node(rose_address *);
-extern void rose_del_loopback_node(rose_address *);
-extern void rose_rt_device_down(struct net_device *);
-extern void rose_link_device_down(struct net_device *);
-extern struct net_device *rose_dev_first(void);
-extern struct net_device *rose_dev_get(rose_address *);
-extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
-extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
-extern int  rose_rt_ioctl(unsigned int, void __user *);
-extern void rose_link_failed(ax25_cb *, int);
-extern int  rose_route_frame(struct sk_buff *, ax25_cb *);
-extern void rose_rt_free(void);
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(rose_address *);
+void rose_del_loopback_node(rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+                                 unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
 
 /* rose_subr.c */
-extern void rose_clear_queues(struct sock *);
-extern void rose_frames_acked(struct sock *, unsigned short);
-extern void rose_requeue_frames(struct sock *);
-extern int  rose_validate_nr(struct sock *, unsigned short);
-extern void rose_write_internal(struct sock *, int);
-extern int  rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int  rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
-extern void rose_disconnect(struct sock *, int, int, int);
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+                         struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
 
 /* rose_timer.c */
-extern void rose_start_heartbeat(struct sock *);
-extern void rose_start_t1timer(struct sock *);
-extern void rose_start_t2timer(struct sock *);
-extern void rose_start_t3timer(struct sock *);
-extern void rose_start_hbtimer(struct sock *);
-extern void rose_start_idletimer(struct sock *);
-extern void rose_stop_heartbeat(struct sock *);
-extern void rose_stop_timer(struct sock *);
-extern void rose_stop_idletimer(struct sock *);
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
 
 /* sysctl_net_rose.c */
-extern void rose_register_sysctl(void);
-extern void rose_unregister_sysctl(void);
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
 
 #endif
index afdeeb5bec251ac773f03d8c7f0b830c9fa137e2..dd4ae0029fd802936b6628af9544cb51f77616ff 100644 (file)
@@ -39,6 +39,7 @@
 #define RTO_ONLINK     0x01
 
 #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos)   (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
 
 struct fib_nh;
 struct fib_info;
@@ -87,34 +88,28 @@ struct ip_rt_acct {
 };
 
 struct rt_cache_stat {
-        unsigned int in_hit;
         unsigned int in_slow_tot;
         unsigned int in_slow_mc;
         unsigned int in_no_route;
         unsigned int in_brd;
         unsigned int in_martian_dst;
         unsigned int in_martian_src;
-        unsigned int out_hit;
         unsigned int out_slow_tot;
         unsigned int out_slow_mc;
-        unsigned int gc_total;
-        unsigned int gc_ignored;
-        unsigned int gc_goal_miss;
-        unsigned int gc_dst_overflow;
-        unsigned int in_hlist_search;
-        unsigned int out_hlist_search;
 };
 
 extern struct ip_rt_acct __percpu *ip_rt_acct;
 
 struct in_device;
-extern int             ip_rt_init(void);
-extern void            rt_cache_flush(struct net *net);
-extern void            rt_flush_dev(struct net_device *dev);
-extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
-extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
-                                          struct sock *sk);
-extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+                                   struct sock *sk);
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+                                      struct dst_entry *dst_orig);
 
 static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
 {
@@ -162,8 +157,8 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
        return ip_route_output_key(net, fl4);
 }
 
-extern int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
-                               u8 tos, struct net_device *devin);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+                        u8 tos, struct net_device *devin);
 
 static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
                                 u8 tos, struct net_device *devin)
@@ -179,24 +174,25 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
        return err;
 }
 
-extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
-                            int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-extern void ipv4_redirect(struct sk_buff *skb, struct net *net,
-                         int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
-
-extern unsigned int            inet_addr_type(struct net *net, __be32 addr);
-extern unsigned int            inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
-extern void            ip_rt_multicast_event(struct in_device *);
-extern int             ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
-extern void            ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
-extern int             ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+                     u32 mark, u8 protocol, int flow_flags);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+                  u8 protocol, int flow_flags);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
+
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+                               __be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
 
 struct in_ifaddr;
-extern void fib_add_ifaddr(struct in_ifaddr *);
-extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
 
 static inline void ip_rt_put(struct rtable *rt)
 {
index 702664833a53d65c24e3b60316bbf37804e013c3..bb13a182fba6ed076851f58590ab662e429bf01a 100644 (file)
@@ -8,14 +8,12 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
 typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
 typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
 
-extern int     __rtnl_register(int protocol, int msgtype,
-                               rtnl_doit_func, rtnl_dumpit_func,
-                               rtnl_calcit_func);
-extern void    rtnl_register(int protocol, int msgtype,
-                             rtnl_doit_func, rtnl_dumpit_func,
-                             rtnl_calcit_func);
-extern int     rtnl_unregister(int protocol, int msgtype);
-extern void    rtnl_unregister_all(int protocol);
+int __rtnl_register(int protocol, int msgtype,
+                   rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+void rtnl_register(int protocol, int msgtype,
+                  rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
 
 static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
 {
@@ -83,11 +81,11 @@ struct rtnl_link_ops {
        unsigned int            (*get_num_rx_queues)(void);
 };
 
-extern int     __rtnl_link_register(struct rtnl_link_ops *ops);
-extern void    __rtnl_link_unregister(struct rtnl_link_ops *ops);
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
 
-extern int     rtnl_link_register(struct rtnl_link_ops *ops);
-extern void    rtnl_link_unregister(struct rtnl_link_ops *ops);
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
 
 /**
  *     struct rtnl_af_ops - rtnetlink address family operations
@@ -117,18 +115,18 @@ struct rtnl_af_ops {
                                               const struct nlattr *attr);
 };
 
-extern int     __rtnl_af_register(struct rtnl_af_ops *ops);
-extern void    __rtnl_af_unregister(struct rtnl_af_ops *ops);
+int __rtnl_af_register(struct rtnl_af_ops *ops);
+void __rtnl_af_unregister(struct rtnl_af_ops *ops);
 
-extern int     rtnl_af_register(struct rtnl_af_ops *ops);
-extern void    rtnl_af_unregister(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+                                   const struct rtnl_link_ops *ops,
+                                   struct nlattr *tb[]);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
-extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-extern struct net_device *rtnl_create_link(struct net *net,
-       char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
-extern int rtnl_configure_link(struct net_device *dev,
-                              const struct ifinfomsg *ifm);
 extern const struct nla_policy ifla_policy[IFLA_MAX+1];
 
 #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
index f4eb365f7dcdadbab70e60940c519e3f83ea3253..d0a6321c302ef2316628b747da27f37cada0b920 100644 (file)
@@ -702,13 +702,20 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-                              const struct tc_ratespec *conf);
+                              const struct tc_ratespec *conf,
+                              u64 rate64);
 
 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
                                          const struct psched_ratecfg *r)
 {
        memset(res, 0, sizeof(*res));
-       res->rate = r->rate_bytes_ps;
+
+       /* legacy struct tc_ratespec has a 32bit @rate field
+        * Qdisc using 64bit rate should add new attributes
+        * in order to maintain compatibility.
+        */
+       res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
        res->overhead = r->overhead;
        res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
index 8de2d37d2077f2df685baec887b1f836a8fa6514..262532d111f51e3a91a06af785b4721e8fab9d56 100644 (file)
@@ -33,11 +33,11 @@ struct scm_cookie {
 #endif
 };
 
-extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
-extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
-extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
-extern void __scm_destroy(struct scm_cookie *scm);
-extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
 
 #ifdef CONFIG_SECURITY_NETWORK
 static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
index 3794c5ad20fef72a960c34323dab275d6f26bac7..c5fe80697f8d442d9c52cb4aabc9b4edfff85223 100644 (file)
 /*
  * sctp/protocol.c
  */
-extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
-                                    sctp_scope_t, gfp_t gfp,
-                                    int flags);
-extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
-extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
+                             sctp_scope_t, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -110,7 +109,7 @@ void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
-extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 
 /*
  * sctp/primitive.c
index 6ca975bebd37deea98470a75053f399671092498..f257486f17be4bed528826544359c569e6a4b2dd 100644 (file)
@@ -3,19 +3,18 @@
 
 #include <linux/types.h>
 
-extern void net_secret_init(void);
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
-                                     __be16 dport);
-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
-                                       __be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
-                                         __be16 sport, __be16 dport);
-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
-                                      __be16 sport, __be16 dport);
-extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
-                                        __be16 sport, __be16 dport);
+__u32 secure_ip_id(__be32 daddr);
+__u32 secure_ipv6_id(const __be32 daddr[4]);
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+                              __be16 dport);
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+                                __be16 sport, __be16 dport);
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+                                  __be16 sport, __be16 dport);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+                               __be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+                                 __be16 sport, __be16 dport);
 
 #endif /* _NET_SECURE_SEQ */
index 6ba2e7b0e2b1300f3983ef4d3c845e257d46c579..e3a18ff0c38b58aec20413489bf7237c54a14a77 100644 (file)
@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair;
  */
 struct sock_common {
        /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
-        * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH()
+        * address on 64bit arches : cf INET_MATCH()
         */
        union {
                __addrpair      skc_addrpair;
@@ -191,6 +191,12 @@ struct sock_common {
 #ifdef CONFIG_NET_NS
        struct net              *skc_net;
 #endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr         skc_v6_daddr;
+       struct in6_addr         skc_v6_rcv_saddr;
+#endif
+
        /*
         * fields between dontcopy_begin/dontcopy_end
         * are not copied in sock_copy()
@@ -218,7 +224,7 @@ struct cg_proto;
   *    @sk_lock:       synchronizer
   *    @sk_rcvbuf: size of receive buffer in bytes
   *    @sk_wq: sock wait queue and async head
-  *    @sk_rx_dst: receive input route used by early tcp demux
+  *    @sk_rx_dst: receive input route used by early demux
   *    @sk_dst_cache: destination cache
   *    @sk_dst_lock: destination cache lock
   *    @sk_policy: flow policy
@@ -233,6 +239,7 @@ struct cg_proto;
   *    @sk_ll_usec: usecs to busypoll when there is no data
   *    @sk_allocation: allocation mode
   *    @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+  *    @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *    @sk_sndbuf: size of send buffer in bytes
   *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *               %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +306,12 @@ struct sock {
 #define sk_dontcopy_begin      __sk_common.skc_dontcopy_begin
 #define sk_dontcopy_end                __sk_common.skc_dontcopy_end
 #define sk_hash                        __sk_common.skc_hash
+#define sk_portpair            __sk_common.skc_portpair
+#define sk_num                 __sk_common.skc_num
+#define sk_dport               __sk_common.skc_dport
+#define sk_addrpair            __sk_common.skc_addrpair
+#define sk_daddr               __sk_common.skc_daddr
+#define sk_rcv_saddr           __sk_common.skc_rcv_saddr
 #define sk_family              __sk_common.skc_family
 #define sk_state               __sk_common.skc_state
 #define sk_reuse               __sk_common.skc_reuse
@@ -307,6 +320,9 @@ struct sock {
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
 #define sk_net                 __sk_common.skc_net
+#define sk_v6_daddr            __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr        __sk_common.skc_v6_rcv_saddr
+
        socket_lock_t           sk_lock;
        struct sk_buff_head     sk_receive_queue;
        /*
@@ -363,6 +379,7 @@ struct sock {
        int                     sk_wmem_queued;
        gfp_t                   sk_allocation;
        u32                     sk_pacing_rate; /* bytes per second */
+       u32                     sk_max_pacing_rate;
        netdev_features_t       sk_route_caps;
        netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
@@ -409,6 +426,11 @@ struct sock {
        void                    (*sk_destruct)(struct sock *sk);
 };
 
+#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+
+#define rcu_dereference_sk_user_data(sk)       rcu_dereference(__sk_user_data((sk)))
+#define rcu_assign_sk_user_data(sk, ptr)       rcu_assign_pointer(__sk_user_data((sk)), ptr)
+
 /*
  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
  * or not whether his port will be reused by someone else. SK_FORCE_REUSE
@@ -746,7 +768,7 @@ static inline int sk_stream_wspace(const struct sock *sk)
        return sk->sk_sndbuf - sk->sk_wmem_queued;
 }
 
-extern void sk_stream_write_space(struct sock *sk);
+void sk_stream_write_space(struct sock *sk);
 
 /* OOB backlog add */
 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -788,7 +810,7 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
        return 0;
 }
 
-extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 
 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
@@ -853,15 +875,15 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
                __rc;                                                   \
        })
 
-extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
-extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
-extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
-extern int sk_stream_error(struct sock *sk, int flags, int err);
-extern void sk_stream_kill_queues(struct sock *sk);
-extern void sk_set_memalloc(struct sock *sk);
-extern void sk_clear_memalloc(struct sock *sk);
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
 
-extern int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo);
 
 struct request_sock_ops;
 struct timewait_sock_ops;
@@ -1014,10 +1036,10 @@ enum cg_proto_flags {
 
 struct cg_proto {
        void                    (*enter_memory_pressure)(struct sock *sk);
-       struct res_counter      *memory_allocated;      /* Current allocated memory. */
-       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
-       int                     *memory_pressure;
-       long                    *sysctl_mem;
+       struct res_counter      memory_allocated;       /* Current allocated memory. */
+       struct percpu_counter   sockets_allocated;      /* Current number of sockets. */
+       int                     memory_pressure;
+       long                    sysctl_mem[3];
        unsigned long           flags;
        /*
         * memcg field is used to find which memcg we belong directly
@@ -1031,8 +1053,8 @@ struct cg_proto {
        struct mem_cgroup       *memcg;
 };
 
-extern int proto_register(struct proto *prot, int alloc_slab);
-extern void proto_unregister(struct proto *prot);
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
 
 static inline bool memcg_proto_active(struct cg_proto *cg_proto)
 {
@@ -1113,7 +1135,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
                return false;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return !!*sk->sk_cgrp->memory_pressure;
+               return !!sk->sk_cgrp->memory_pressure;
 
        return !!*sk->sk_prot->memory_pressure;
 }
@@ -1133,8 +1155,8 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
                struct proto *prot = sk->sk_prot;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       if (*cg_proto->memory_pressure)
-                               *cg_proto->memory_pressure = 0;
+                       if (cg_proto->memory_pressure)
+                               cg_proto->memory_pressure = 0;
        }
 
 }
@@ -1170,7 +1192,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
        struct res_counter *fail;
        int ret;
 
-       ret = res_counter_charge_nofail(prot->memory_allocated,
+       ret = res_counter_charge_nofail(&prot->memory_allocated,
                                        amt << PAGE_SHIFT, &fail);
        if (ret < 0)
                *parent_status = OVER_LIMIT;
@@ -1179,13 +1201,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
 static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
                                              unsigned long amt)
 {
-       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+       res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
 }
 
 static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
 {
        u64 ret;
-       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
        return ret >> PAGE_SHIFT;
 }
 
@@ -1233,7 +1255,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk)
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_dec(cg_proto->sockets_allocated);
+                       percpu_counter_dec(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_dec(prot->sockets_allocated);
@@ -1247,7 +1269,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_inc(cg_proto->sockets_allocated);
+                       percpu_counter_inc(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_inc(prot->sockets_allocated);
@@ -1259,7 +1281,7 @@ sk_sockets_allocated_read_positive(struct sock *sk)
        struct proto *prot = sk->sk_prot;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+               return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
 
        return percpu_counter_read_positive(prot->sockets_allocated);
 }
@@ -1287,8 +1309,8 @@ proto_memory_pressure(struct proto *prot)
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
-extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
-extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
                int inc)
@@ -1364,8 +1386,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
 /*
  * Functions for memory accounting
  */
-extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reclaim(struct sock *sk);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1473,14 +1495,14 @@ do {                                                                    \
        lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
 } while (0)
 
-extern void lock_sock_nested(struct sock *sk, int subclass);
+void lock_sock_nested(struct sock *sk, int subclass);
 
 static inline void lock_sock(struct sock *sk)
 {
        lock_sock_nested(sk, 0);
 }
 
-extern void release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
 
 /* BH context may only use the following locking interface. */
 #define bh_lock_sock(__sk)     spin_lock(&((__sk)->sk_lock.slock))
@@ -1489,7 +1511,7 @@ extern void release_sock(struct sock *sk);
                                SINGLE_DEPTH_NESTING)
 #define bh_unlock_sock(__sk)   spin_unlock(&((__sk)->sk_lock.slock))
 
-extern bool lock_sock_fast(struct sock *sk);
+bool lock_sock_fast(struct sock *sk);
 /**
  * unlock_sock_fast - complement of lock_sock_fast
  * @sk: socket
@@ -1507,108 +1529,84 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
 }
 
 
-extern struct sock             *sk_alloc(struct net *net, int family,
-                                         gfp_t priority,
-                                         struct proto *prot);
-extern void                    sk_free(struct sock *sk);
-extern void                    sk_release_kernel(struct sock *sk);
-extern struct sock             *sk_clone_lock(const struct sock *sk,
-                                              const gfp_t priority);
-
-extern struct sk_buff          *sock_wmalloc(struct sock *sk,
-                                             unsigned long size, int force,
-                                             gfp_t priority);
-extern struct sk_buff          *sock_rmalloc(struct sock *sk,
-                                             unsigned long size, int force,
-                                             gfp_t priority);
-extern void                    sock_wfree(struct sk_buff *skb);
-extern void                    skb_orphan_partial(struct sk_buff *skb);
-extern void                    sock_rfree(struct sk_buff *skb);
-extern void                    sock_edemux(struct sk_buff *skb);
-
-extern int                     sock_setsockopt(struct socket *sock, int level,
-                                               int op, char __user *optval,
-                                               unsigned int optlen);
-
-extern int                     sock_getsockopt(struct socket *sock, int level,
-                                               int op, char __user *optval,
-                                               int __user *optlen);
-extern struct sk_buff          *sock_alloc_send_skb(struct sock *sk,
-                                                    unsigned long size,
-                                                    int noblock,
-                                                    int *errcode);
-extern struct sk_buff          *sock_alloc_send_pskb(struct sock *sk,
-                                                     unsigned long header_len,
-                                                     unsigned long data_len,
-                                                     int noblock,
-                                                     int *errcode,
-                                                     int max_page_order);
-extern void *sock_kmalloc(struct sock *sk, int size,
-                         gfp_t priority);
-extern void sock_kfree_s(struct sock *sk, void *mem, int size);
-extern void sk_send_sigurg(struct sock *sk);
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+                     struct proto *prot);
+void sk_free(struct sock *sk);
+void sk_release_kernel(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+                            gfp_t priority);
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+                            gfp_t priority);
+void sock_wfree(struct sk_buff *skb);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_edemux(struct sk_buff *skb);
+
+int sock_setsockopt(struct socket *sock, int level, int op,
+                   char __user *optval, unsigned int optlen);
+
+int sock_getsockopt(struct socket *sock, int level, int op,
+                   char __user *optval, int __user *optlen);
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+                                   int noblock, int *errcode);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+                                    unsigned long data_len, int noblock,
+                                    int *errcode, int max_page_order);
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
  */
-extern int                      sock_no_bind(struct socket *,
-                                            struct sockaddr *, int);
-extern int                      sock_no_connect(struct socket *,
-                                               struct sockaddr *, int, int);
-extern int                      sock_no_socketpair(struct socket *,
-                                                  struct socket *);
-extern int                      sock_no_accept(struct socket *,
-                                              struct socket *, int);
-extern int                      sock_no_getname(struct socket *,
-                                               struct sockaddr *, int *, int);
-extern unsigned int             sock_no_poll(struct file *, struct socket *,
-                                            struct poll_table_struct *);
-extern int                      sock_no_ioctl(struct socket *, unsigned int,
-                                             unsigned long);
-extern int                     sock_no_listen(struct socket *, int);
-extern int                      sock_no_shutdown(struct socket *, int);
-extern int                     sock_no_getsockopt(struct socket *, int , int,
-                                                  char __user *, int __user *);
-extern int                     sock_no_setsockopt(struct socket *, int, int,
-                                                  char __user *, unsigned int);
-extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
-                                               struct msghdr *, size_t);
-extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
-                                               struct msghdr *, size_t, int);
-extern int                     sock_no_mmap(struct file *file,
-                                            struct socket *sock,
-                                            struct vm_area_struct *vma);
-extern ssize_t                 sock_no_sendpage(struct socket *sock,
-                                               struct page *page,
-                                               int offset, size_t size,
-                                               int flags);
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
+unsigned int sock_no_poll(struct file *, struct socket *,
+                         struct poll_table_struct *);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
+int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
+int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+                   int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+                struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+                        size_t size, int flags);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * uses the inet style.
  */
-extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, int __user *optlen);
-extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size, int flags);
-extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, unsigned int optlen);
-extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+int compat_sock_common_getsockopt(struct socket *sock, int level,
                int optname, char __user *optval, int __user *optlen);
-extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+int compat_sock_common_setsockopt(struct socket *sock, int level,
                int optname, char __user *optval, unsigned int optlen);
 
-extern void sk_common_release(struct sock *sk);
+void sk_common_release(struct sock *sk);
 
 /*
  *     Default socket callbacks and setup code
  */
 
 /* Initialise core socket variables */
-extern void sock_init_data(struct socket *sock, struct sock *sk);
+void sock_init_data(struct socket *sock, struct sock *sk);
 
-extern void sk_filter_release_rcu(struct rcu_head *rcu);
+void sk_filter_release_rcu(struct rcu_head *rcu);
 
 /**
  *     sk_filter_release - release a socket filter
@@ -1625,16 +1623,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 {
-       unsigned int size = sk_filter_len(fp);
-
-       atomic_sub(size, &sk->sk_omem_alloc);
+       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
        sk_filter_release(fp);
 }
 
 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
 }
 
 /*
@@ -1668,9 +1664,12 @@ static inline void sock_put(struct sock *sk)
        if (atomic_dec_and_test(&sk->sk_refcnt))
                sk_free(sk);
 }
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
 
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
-                         const int nested);
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
@@ -1724,8 +1723,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-extern kuid_t sock_i_uid(struct sock *sk);
-extern unsigned long sock_i_ino(struct sock *sk);
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
 
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
@@ -1747,8 +1746,6 @@ sk_dst_get(struct sock *sk)
        return dst;
 }
 
-extern void sk_reset_txq(struct sock *sk);
-
 static inline void dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1758,7 +1755,7 @@ static inline void dst_negative_advice(struct sock *sk)
 
                if (ndst != dst) {
                        rcu_assign_pointer(sk->sk_dst_cache, ndst);
-                       sk_reset_txq(sk);
+                       sk_tx_queue_clear(sk);
                }
        }
 }
@@ -1800,16 +1797,16 @@ sk_dst_reset(struct sock *sk)
        spin_unlock(&sk->sk_dst_lock);
 }
 
-extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
-extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
 static inline bool sk_can_gso(const struct sock *sk)
 {
        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
 }
 
-extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
@@ -2022,14 +2019,14 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
-extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
-                          unsigned long expires);
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+                   unsigned long expires);
 
-extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
-extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
-extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
 
 /*
  *     Recover an error report and clear atomically
@@ -2097,7 +2094,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
        return &sk->sk_frag;
 }
 
-extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
 
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
@@ -2135,10 +2132,10 @@ static inline int sock_intr_errno(long timeo)
        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
 }
 
-extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
-       struct sk_buff *skb);
-extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
-       struct sk_buff *skb);
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+                          struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+                            struct sk_buff *skb);
 
 static inline void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -2171,8 +2168,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
                __sock_recv_wifi_status(msg, sk, skb);
 }
 
-extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
-                                    struct sk_buff *skb);
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+                             struct sk_buff *skb);
 
 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
                                          struct sk_buff *skb)
@@ -2197,7 +2194,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
  *
  * Currently only depends on SOCK_TIMESTAMPING* flags.
  */
-extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
 
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
@@ -2261,11 +2258,11 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
        return NULL;
 }
 
-extern void sock_enable_timestamp(struct sock *sk, int flag);
-extern int sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int sock_get_timestampns(struct sock *, struct timespec __user *);
-extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
-                             int level, int type);
+void sock_enable_timestamp(struct sock *sk, int flag);
+int sock_get_timestamp(struct sock *, struct timeval __user *);
+int sock_get_timestampns(struct sock *, struct timespec __user *);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+                      int type);
 
 /*
  *     Enable debug/info messages
index ad447f105417def47daff3e65f93de65d26f0917..3af174d70d9e8ca988122b75358703b99c0b4d4a 100644 (file)
@@ -8,7 +8,7 @@ struct stp_proto {
        void            *data;
 };
 
-extern int stp_proto_register(const struct stp_proto *proto);
-extern void stp_proto_unregister(const struct stp_proto *proto);
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
 
 #endif /* _NET_STP_H */
index b1aa324c5e6512791cbf3340aca34dacc73bdfdb..2d7b4bdc972ff38ac5a0a0f357c8c081c7d09014 100644 (file)
@@ -50,7 +50,7 @@
 extern struct inet_hashinfo tcp_hashinfo;
 
 extern struct percpu_counter tcp_orphan_count;
-extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 #define MAX_TCP_HEADER (128 + MAX_HEADER)
 #define MAX_TCP_OPTION_SPACE 40
@@ -259,6 +259,7 @@ extern int sysctl_tcp_max_orphans;
 extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_dsack;
+extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -325,7 +326,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
        return false;
 }
 
-extern bool tcp_check_oom(struct sock *sk, int shift);
+bool tcp_check_oom(struct sock *sk, int shift);
 
 /* syncookies: remember time of last synqueue overflow */
 static inline void tcp_synq_overflow(struct sock *sk)
@@ -348,38 +349,36 @@ extern struct proto tcp_prot;
 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
-extern void tcp_init_mem(struct net *net);
-
-extern void tcp_tasklet_init(void);
-
-extern void tcp_v4_err(struct sk_buff *skb, u32);
-
-extern void tcp_shutdown (struct sock *sk, int how);
-
-extern void tcp_v4_early_demux(struct sk_buff *skb);
-extern int tcp_v4_rcv(struct sk_buff *skb);
-
-extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t size);
-extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
-                       size_t size, int flags);
-extern void tcp_release_cb(struct sock *sk);
-extern void tcp_wfree(struct sk_buff *skb);
-extern void tcp_write_timer_handler(struct sock *sk);
-extern void tcp_delack_timer_handler(struct sock *sk);
-extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-                                const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-                               const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_space_adjust(struct sock *sk);
-extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
-extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
-extern void tcp_twsk_destructor(struct sock *sk);
-extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
-                              struct pipe_inode_info *pipe, size_t len,
-                              unsigned int flags);
+void tcp_tasklet_init(void);
+
+void tcp_v4_err(struct sk_buff *skb, u32);
+
+void tcp_shutdown(struct sock *sk, int how);
+
+void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
+
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t size);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+                int flags);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+                         const struct tcphdr *th, unsigned int len);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+                        const struct tcphdr *th, unsigned int len);
+void tcp_rcv_space_adjust(struct sock *sk);
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+                       struct pipe_inode_info *pipe, size_t len,
+                       unsigned int flags);
 
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
@@ -409,66 +408,65 @@ enum tcp_tw_status {
 };
 
 
-extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
-                                                    struct sk_buff *skb,
-                                                    const struct tcphdr *th);
-extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
-                                  struct request_sock *req,
-                                  struct request_sock **prev,
-                                  bool fastopen);
-extern int tcp_child_process(struct sock *parent, struct sock *child,
-                            struct sk_buff *skb);
-extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_sock *tp);
-extern void tcp_update_metrics(struct sock *sk);
-extern void tcp_init_metrics(struct sock *sk);
-extern void tcp_metrics_init(void);
-extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
-extern bool tcp_remember_stamp(struct sock *sk);
-extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
-extern void tcp_disable_fack(struct tcp_sock *tp);
-extern void tcp_close(struct sock *sk, long timeout);
-extern void tcp_init_sock(struct sock *sk);
-extern unsigned int tcp_poll(struct file * file, struct socket *sock,
-                            struct poll_table_struct *wait);
-extern int tcp_getsockopt(struct sock *sk, int level, int optname,
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+                                             struct sk_buff *skb,
+                                             const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+                          struct request_sock *req, struct request_sock **prev,
+                          bool fastopen);
+int tcp_child_process(struct sock *parent, struct sock *child,
+                     struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk, int how);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
+                       bool paws_check);
+bool tcp_remember_stamp(struct sock *sk);
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+void tcp_disable_fack(struct tcp_sock *tp);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+unsigned int tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, int __user *optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, unsigned int optlen);
+int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, int __user *optlen);
-extern int tcp_setsockopt(struct sock *sk, int level, int optname,
+int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, unsigned int optlen);
-extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen);
-extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, unsigned int optlen);
-extern void tcp_set_keepalive(struct sock *sk, int val);
-extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(const struct sk_buff *skb,
-                             struct tcp_options_received *opt_rx,
-                             int estab, struct tcp_fastopen_cookie *foc);
-extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len, int nonblock, int flags, int *addr_len);
+void tcp_parse_options(const struct sk_buff *skb,
+                      struct tcp_options_received *opt_rx,
+                      int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 /*
  *     TCP v4 functions exported for the inet6 API
  */
 
-extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock * tcp_create_openreq_child(struct sock *sk,
-                                             struct request_sock *req,
-                                             struct sk_buff *skb);
-extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
-                                         struct request_sock *req,
-                                         struct dst_entry *dst);
-extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-                         int addr_len);
-extern int tcp_connect(struct sock *sk);
-extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
-                                       struct request_sock *req,
-                                       struct tcp_fastopen_cookie *foc);
-extern int tcp_disconnect(struct sock *sk, int flags);
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(struct sock *sk,
+                                     struct request_sock *req,
+                                     struct sk_buff *skb);
+struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+                                 struct request_sock *req,
+                                 struct dst_entry *dst);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+                               struct request_sock *req,
+                               struct tcp_fastopen_cookie *foc);
+int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_connect_init(struct sock *sk);
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -476,16 +474,32 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
-                            u32 cookie);
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
-                                   struct ip_options *opt);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+                     u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+                            struct ip_options *opt);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
-                                    const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
-                                    __u16 *mss);
+#include <linux/ktime.h>
+
+/* Syncookies use a monotonic timer which increments every 64 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value.  A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
+
+static inline u32 tcp_cookie_time(void)
+{
+       struct timespec now;
+       getnstimeofday(&now);
+       return now.tv_sec >> 6; /* 64 seconds granularity */
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+                             u16 *mssp);
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 #else
 static inline __u32 cookie_v4_init_sequence(struct sock *sk,
                                            struct sk_buff *skb,
@@ -495,19 +509,19 @@ static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 }
 #endif
 
-extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern bool cookie_check_timestamp(struct tcp_options_received *opt,
-                               struct net *net, bool *ecn_ok);
+__u32 cookie_init_timestamp(struct request_sock *req);
+bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
+                           bool *ecn_ok);
 
 /* From net/ipv6/syncookies.c */
-extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
-                            u32 cookie);
-extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+                     u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
-                                    const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
-                                    __u16 *mss);
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+                             const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mss);
 #else
 static inline __u32 cookie_v6_init_sequence(struct sock *sk,
                                            struct sk_buff *skb,
@@ -518,47 +532,46 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 #endif
 /* tcp_output.c */
 
-extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
-                                     int nonagle);
-extern bool tcp_may_send_now(struct sock *sk);
-extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_retransmit_timer(struct sock *sk);
-extern void tcp_xmit_retransmit_queue(struct sock *);
-extern void tcp_simple_retransmit(struct sock *);
-extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-
-extern void tcp_send_probe0(struct sock *);
-extern void tcp_send_partial(struct sock *);
-extern int tcp_write_wakeup(struct sock *);
-extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
-extern int tcp_send_synack(struct sock *);
-extern bool tcp_syn_flood_action(struct sock *sk,
-                                const struct sk_buff *skb,
-                                const char *proto);
-extern void tcp_push_one(struct sock *, unsigned int mss_now);
-extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk);
-extern void tcp_send_loss_probe(struct sock *sk);
-extern bool tcp_schedule_loss_probe(struct sock *sk);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                              int nonagle);
+bool tcp_may_send_now(struct sock *sk);
+int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
+                         const char *proto);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk);
 
 /* tcp_input.c */
-extern void tcp_cwnd_application_limited(struct sock *sk);
-extern void tcp_resume_early_retransmit(struct sock *sk);
-extern void tcp_rearm_rto(struct sock *sk);
-extern void tcp_reset(struct sock *sk);
+void tcp_cwnd_application_limited(struct sock *sk);
+void tcp_resume_early_retransmit(struct sock *sk);
+void tcp_rearm_rto(struct sock *sk);
+void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
-extern void tcp_init_xmit_timers(struct sock *);
+void tcp_init_xmit_timers(struct sock *);
 static inline void tcp_clear_xmit_timers(struct sock *sk)
 {
        inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
-extern unsigned int tcp_current_mss(struct sock *sk);
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
 
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -584,20 +597,20 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 }
 
 /* tcp.c */
-extern void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(const struct sock *, struct tcp_info *);
 
 /* Read 'sendfile()'-style from a TCP socket */
 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
                                unsigned int, size_t);
-extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
-                        sk_read_actor_t recv_actor);
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+                 sk_read_actor_t recv_actor);
 
-extern void tcp_initialize_rcv_mss(struct sock *sk);
+void tcp_initialize_rcv_mss(struct sock *sk);
 
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
-extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_init_buffer_space(struct sock *sk);
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
@@ -610,7 +623,7 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
        return (tp->srtt >> 3) + tp->rttvar;
 }
 
-extern void tcp_set_rto(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
 
 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 {
@@ -663,7 +676,7 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
  * scaling applied to the result.  The caller does these things
  * if necessary.  This is a "raw" window selection.
  */
-extern u32 __tcp_select_window(struct sock *sk);
+u32 __tcp_select_window(struct sock *sk);
 
 void tcp_send_window_probe(struct sock *sk);
 
@@ -800,24 +813,24 @@ struct tcp_congestion_ops {
        struct module   *owner;
 };
 
-extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 
-extern void tcp_init_congestion_control(struct sock *sk);
-extern void tcp_cleanup_congestion_control(struct sock *sk);
-extern int tcp_set_default_congestion_control(const char *name);
-extern void tcp_get_default_congestion_control(char *name);
-extern void tcp_get_available_congestion_control(char *buf, size_t len);
-extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
-extern int tcp_set_allowed_congestion_control(char *allowed);
-extern int tcp_set_congestion_control(struct sock *sk, const char *name);
-extern void tcp_slow_start(struct tcp_sock *tp);
-extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(const char *name);
+void tcp_get_default_congestion_control(char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name);
+void tcp_slow_start(struct tcp_sock *tp);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
-extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
-extern u32 tcp_reno_min_cwnd(const struct sock *sk);
+u32 tcp_reno_ssthresh(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
+u32 tcp_reno_min_cwnd(const struct sock *sk);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -936,8 +949,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
 /* The maximum number of MSS of available cwnd for which TSO defers
  * sending if not using sysctl_tcp_tso_win_divisor.
@@ -963,7 +976,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 {
        return tp->snd_una + tp->snd_wnd;
 }
-extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 
 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
                                       const struct sk_buff *skb)
@@ -1028,7 +1041,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
 #endif
 }
 
-extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 
 #undef STATE_TRACE
 
@@ -1039,9 +1052,9 @@ static const char *statename[]={
        "Close Wait","Last ACK","Listen","Closing"
 };
 #endif
-extern void tcp_set_state(struct sock *sk, int state);
+void tcp_set_state(struct sock *sk, int state);
 
-extern void tcp_done(struct sock *sk);
+void tcp_done(struct sock *sk);
 
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
@@ -1049,13 +1062,12 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
        rx_opt->num_sacks = 0;
 }
 
-extern u32 tcp_default_init_rwnd(u32 mss);
+u32 tcp_default_init_rwnd(u32 mss);
 
 /* Determine a window scaling and initial window to offer. */
-extern void tcp_select_initial_window(int __space, __u32 mss,
-                                     __u32 *rcv_wnd, __u32 *window_clamp,
-                                     int wscale_ok, __u8 *rcv_wscale,
-                                     __u32 init_rcv_wnd);
+void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+                              __u32 *window_clamp, int wscale_ok,
+                              __u8 *rcv_wscale, __u32 init_rcv_wnd);
 
 static inline int tcp_win_from_space(int space)
 {
@@ -1095,11 +1107,11 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->wscale_ok = rx_opt->wscale_ok;
        ireq->acked = 0;
        ireq->ecn_ok = 0;
-       ireq->rmt_port = tcp_hdr(skb)->source;
-       ireq->loc_port = tcp_hdr(skb)->dest;
+       ireq->ir_rmt_port = tcp_hdr(skb)->source;
+       ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
 }
 
-extern void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
 {
@@ -1252,21 +1264,20 @@ struct tcp_md5sig_pool {
 };
 
 /* - functions */
-extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
-                              const struct sock *sk,
-                              const struct request_sock *req,
-                              const struct sk_buff *skb);
-extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                         int family, const u8 *newkey,
-                         u8 newkeylen, gfp_t gfp);
-extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-                         int family);
-extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+                       const struct sock *sk, const struct request_sock *req,
+                       const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+                  int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+                  int family);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
                                         struct sock *addr_sk);
 
 #ifdef CONFIG_TCP_MD5SIG
-extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
-                       const union tcp_md5_addr *addr, int family);
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+                                        const union tcp_md5_addr *addr,
+                                        int family);
 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
 #else
 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1278,27 +1289,26 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 #define tcp_twsk_md5_key(twsk) NULL
 #endif
 
-extern bool tcp_alloc_md5sig_pool(void);
+bool tcp_alloc_md5sig_pool(void);
 
-extern struct tcp_md5sig_pool  *tcp_get_md5sig_pool(void);
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
 static inline void tcp_put_md5sig_pool(void)
 {
        local_bh_enable();
 }
 
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
-                                unsigned int header_len);
-extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
-                           const struct tcp_md5sig_key *key);
+int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+                         unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+                    const struct tcp_md5sig_key *key);
 
 /* From tcp_fastopen.c */
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  bool syn_lost);
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+                           struct tcp_fastopen_cookie *cookie, int *syn_loss,
+                           unsigned long *last_syn_loss);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+                           struct tcp_fastopen_cookie *cookie, bool syn_lost);
 struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
@@ -1309,9 +1319,9 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                                   struct tcp_fastopen_cookie *foc);
-
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+                            struct tcp_fastopen_cookie *foc);
+void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
 /* Fastopen key context */
@@ -1507,7 +1517,6 @@ enum tcp_seq_states {
        TCP_SEQ_STATE_LISTENING,
        TCP_SEQ_STATE_OPENREQ,
        TCP_SEQ_STATE_ESTABLISHED,
-       TCP_SEQ_STATE_TIME_WAIT,
 };
 
 int tcp_seq_open(struct inode *inode, struct file *file);
@@ -1529,22 +1538,20 @@ struct tcp_iter_state {
        loff_t                  last_pos;
 };
 
-extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
+int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
+void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
 
 extern struct request_sock_ops tcp_request_sock_ops;
 extern struct request_sock_ops tcp6_request_sock_ops;
 
-extern void tcp_v4_destroy_sock(struct sock *sk);
+void tcp_v4_destroy_sock(struct sock *sk);
 
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
-                                      netdev_features_t features);
-extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb);
-extern int tcp_gro_complete(struct sk_buff *skb);
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+                               netdev_features_t features);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int tcp_gro_complete(struct sk_buff *skb);
 
-extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
-                               __be32 daddr);
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
@@ -1560,8 +1567,8 @@ static inline bool tcp_stream_memory_free(const struct sock *sk)
 }
 
 #ifdef CONFIG_PROC_FS
-extern int tcp4_proc_init(void);
-extern void tcp4_proc_exit(void);
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
 #endif
 
 /* TCP af-specific functions */
@@ -1592,9 +1599,9 @@ struct tcp_request_sock_ops {
 #endif
 };
 
-extern int tcpv4_offload_init(void);
+int tcpv4_offload_init(void);
 
-extern void tcp_v4_init(void);
-extern void tcp_init(void);
+void tcp_v4_init(void);
+void tcp_init(void);
 
 #endif /* _TCP_H */
index 7df18bc43a97fccef8478436f8f43f3a5553b997..05b94d9453de8b036f1c275caa268b6f8113eac7 100644 (file)
@@ -1,19 +1,7 @@
 #ifndef _TCP_MEMCG_H
 #define _TCP_MEMCG_H
 
-struct tcp_memcontrol {
-       struct cg_proto cg_proto;
-       /* per-cgroup tcp memory pressure knobs */
-       struct res_counter tcp_memory_allocated;
-       struct percpu_counter tcp_sockets_allocated;
-       /* those two are read-mostly, leave them at the end */
-       long tcp_prot_mem[3];
-       int tcp_memory_pressure;
-};
-
 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
 #endif /* _TCP_MEMCG_H */
index ef2e0b7843a0036c1b31d8e67e78b624bf05f44f..fe4ba9f32429ab7c2fc327cafd29bafadeb3561b 100644 (file)
@@ -79,7 +79,7 @@ struct udp_table {
        unsigned int            log;
 };
 extern struct udp_table udp_table;
-extern void udp_table_init(struct udp_table *, const char *);
+void udp_table_init(struct udp_table *, const char *);
 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
                                             struct net *net, unsigned int num)
 {
@@ -162,52 +162,53 @@ static inline void udp_lib_hash(struct sock *sk)
        BUG();
 }
 
-extern void udp_lib_unhash(struct sock *sk);
-extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
 
 static inline void udp_lib_close(struct sock *sk, long timeout)
 {
        sk_common_release(sk);
 }
 
-extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
-                           int (*)(const struct sock *,const struct sock *),
-                           unsigned int hash2_nulladdr);
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+                    int (*)(const struct sock *, const struct sock *),
+                    unsigned int hash2_nulladdr);
 
 /* net/ipv4/udp.c */
-extern int udp_get_port(struct sock *sk, unsigned short snum,
-                       int (*saddr_cmp)(const struct sock *,
-                                        const struct sock *));
-extern void udp_err(struct sk_buff *, u32);
-extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                           struct msghdr *msg, size_t len);
-extern int udp_push_pending_frames(struct sock *sk);
-extern void udp_flush_pending_frames(struct sock *sk);
-extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
-extern int udp_rcv(struct sk_buff *skb);
-extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int udp_disconnect(struct sock *sk, int flags);
-extern unsigned int udp_poll(struct file *file, struct socket *sock,
-                            poll_table *wait);
-extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-                                             netdev_features_t features);
-extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
-                             char __user *optval, int __user *optlen);
-extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                             char __user *optval, unsigned int optlen,
-                             int (*push_pending_frames)(struct sock *));
-extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-                                   __be32 daddr, __be16 dport,
-                                   int dif);
-extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-                                   __be32 daddr, __be16 dport,
-                                   int dif, struct udp_table *tbl);
-extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-                                   const struct in6_addr *daddr, __be16 dport,
-                                   int dif);
-extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-                                   const struct in6_addr *daddr, __be16 dport,
-                                   int dif, struct udp_table *tbl);
+void udp_v4_early_demux(struct sk_buff *skb);
+int udp_get_port(struct sock *sk, unsigned short snum,
+                int (*saddr_cmp)(const struct sock *,
+                                 const struct sock *));
+void udp_err(struct sk_buff *, u32);
+int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_disconnect(struct sock *sk, int flags);
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+                      char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+                      char __user *optval, unsigned int optlen,
+                      int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                            __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                              __be32 daddr, __be16 dport, int dif,
+                              struct udp_table *tbl);
+struct sock *udp6_lib_lookup(struct net *net,
+                            const struct in6_addr *saddr, __be16 sport,
+                            const struct in6_addr *daddr, __be16 dport,
+                            int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+                              const struct in6_addr *saddr, __be16 sport,
+                              const struct in6_addr *daddr, __be16 dport,
+                              int dif, struct udp_table *tbl);
 
 /*
  *     SNMP statistics for UDP and UDP-Lite
@@ -259,19 +260,19 @@ struct udp_iter_state {
 };
 
 #ifdef CONFIG_PROC_FS
-extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
+void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
 
-extern int udp4_proc_init(void);
-extern void udp4_proc_exit(void);
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
 #endif
 
-extern int udpv4_offload_init(void);
+int udpv4_offload_init(void);
 
-extern void udp_init(void);
+void udp_init(void);
 
-extern void udp_encap_enable(void);
+void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
-extern void udpv6_encap_enable(void);
+void udpv6_encap_enable(void);
 #endif
 #endif /* _UDP_H */
index 71375459a8843da6cbbf7062a0e71bf90a6ee716..2caadabcd07baf6552098082fcb78672f772dcb5 100644 (file)
@@ -126,7 +126,7 @@ static inline __wsum udplite_csum(struct sk_buff *skb)
        return skb_checksum(skb, off, len, 0);
 }
 
-extern void    udplite4_register(void);
-extern int     udplite_get_port(struct sock *sk, unsigned short snum,
-                       int (*scmp)(const struct sock *, const struct sock *));
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+                    int (*scmp)(const struct sock *, const struct sock *));
 #endif /* _UDPLITE_H */
index 4f6e7423174cd55890d7845326c8352f66146f04..345911965dbb8f53289d63535828969454ae3333 100644 (file)
@@ -6,13 +6,13 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
-                            void __user *arg);
-extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
-                                   unsigned long arg);
+int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+                     void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+                            unsigned long arg);
 
-extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
-extern int call_commit_handler(struct net_device *dev);
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
 #else
 static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
                                    void __user *arg)
@@ -27,8 +27,8 @@ static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 #endif
 
 #ifdef CONFIG_WEXT_PROC
-extern int wext_proc_init(struct net *net);
-extern void wext_proc_exit(struct net *net);
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
 #else
 static inline int wext_proc_init(struct net *net)
 {
index bbb74f990cab784a5e9ffb4dc6b391977538cf49..98498e1daa060524af05c639fe74a951f2d307e8 100644 (file)
@@ -438,9 +438,9 @@ struct wimax_dev {
  *
  * These functions are not exported to user space.
  */
-extern void wimax_dev_init(struct wimax_dev *);
-extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
-extern void wimax_dev_rm(struct wimax_dev *);
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
 
 static inline
 struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
@@ -454,8 +454,8 @@ struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
        return wimax_dev->net_dev->dev.parent;
 }
 
-extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
-extern enum wimax_st wimax_state_get(struct wimax_dev *);
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
 
 /*
  * Radio Switch state reporting.
@@ -463,8 +463,8 @@ extern enum wimax_st wimax_state_get(struct wimax_dev *);
  * enum wimax_rf_state is declared in linux/wimax.h so the exports
  * to user space can use it.
  */
-extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
 
 
 /*
@@ -490,15 +490,14 @@ extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
  * send diagnostics information that a device-specific diagnostics
  * tool would be interested in.
  */
-extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
-                                      const void *, size_t, gfp_t);
-extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-extern int wimax_msg(struct wimax_dev *, const char *,
-                    const void *, size_t, gfp_t);
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+                               size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
 
-extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-extern const void *wimax_msg_data(struct sk_buff *);
-extern ssize_t wimax_msg_len(struct sk_buff *);
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
 
 
 /*
@@ -513,7 +512,7 @@ extern ssize_t wimax_msg_len(struct sk_buff *);
  * device's control structure and (as such) the 'struct wimax_dev' is
  * referenced by the caller.
  */
-extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-extern int wimax_reset(struct wimax_dev *);
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
 
 #endif /* #ifndef __NET__WIMAX_H__ */
index b4a8a8923128fbc68a9aa0d6dd2fb5dd5524de32..c383aa4edbf0c27980ef7aad22ff160c72b41bbd 100644 (file)
@@ -187,57 +187,57 @@ extern int  sysctl_x25_clear_request_timeout;
 extern int  sysctl_x25_ack_holdback_timeout;
 extern int  sysctl_x25_forward;
 
-extern int x25_parse_address_block(struct sk_buff *skb,
-               struct x25_address *called_addr,
-               struct x25_address *calling_addr);
-
-extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
-                         struct x25_address *);
-extern int  x25_addr_aton(unsigned char *, struct x25_address *,
-                         struct x25_address *);
-extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
-extern void x25_destroy_socket_from_timer(struct sock *);
-extern int  x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
-extern void x25_kill_by_neigh(struct x25_neigh *);
+int x25_parse_address_block(struct sk_buff *skb,
+                           struct x25_address *called_addr,
+                           struct x25_address *calling_addr);
+
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
 
 /* x25_dev.c */
-extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
-extern int  x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-extern void x25_establish_link(struct x25_neigh *);
-extern void x25_terminate_link(struct x25_neigh *);
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+                          struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
 
 /* x25_facilities.c */
-extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
-                               struct x25_dte_facilities *, unsigned long *);
-extern int x25_create_facilities(unsigned char *, struct x25_facilities *,
-                               struct x25_dte_facilities *, unsigned long);
-extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
-                               struct x25_facilities *,
-                               struct x25_dte_facilities *);
-extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+                        struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+                         struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+                            struct x25_facilities *,
+                            struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
 
 /* x25_forward.c */
-extern void x25_clear_forward_by_lci(unsigned int lci);
-extern void x25_clear_forward_by_dev(struct net_device *);
-extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
-extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
-                               struct sk_buff *, int);
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+                    int);
 
 /* x25_in.c */
-extern int  x25_process_rx_frame(struct sock *, struct sk_buff *);
-extern int  x25_backlog_rcv(struct sock *, struct sk_buff *);
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
 
 /* x25_link.c */
-extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
-extern void x25_link_device_up(struct net_device *);
-extern void x25_link_device_down(struct net_device *);
-extern void x25_link_established(struct x25_neigh *);
-extern void x25_link_terminated(struct x25_neigh *);
-extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
-extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
-extern int  x25_subscr_ioctl(unsigned int, void __user *);
-extern struct x25_neigh *x25_get_neigh(struct net_device *);
-extern void x25_link_free(void);
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+                               unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
 
 /* x25_neigh.c */
 static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
@@ -252,16 +252,16 @@ static __inline__ void x25_neigh_put(struct x25_neigh *nb)
 }
 
 /* x25_out.c */
-extern  int x25_output(struct sock *, struct sk_buff *);
-extern void x25_kick(struct sock *);
-extern void x25_enquiry_response(struct sock *);
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
 
 /* x25_route.c */
-extern struct x25_route *x25_get_route(struct x25_address *addr);
-extern struct net_device *x25_dev_get(char *);
-extern void x25_route_device_down(struct net_device *dev);
-extern int  x25_route_ioctl(unsigned int, void __user *);
-extern void x25_route_free(void);
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
 
 static __inline__ void x25_route_hold(struct x25_route *rt)
 {
@@ -275,30 +275,31 @@ static __inline__ void x25_route_put(struct x25_route *rt)
 }
 
 /* x25_subr.c */
-extern void x25_clear_queues(struct sock *);
-extern void x25_frames_acked(struct sock *, unsigned short);
-extern void x25_requeue_frames(struct sock *);
-extern int  x25_validate_nr(struct sock *, unsigned short);
-extern void x25_write_internal(struct sock *, int);
-extern int  x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
-extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+              int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
 
 /* x25_timer.c */
-extern void x25_init_timers(struct sock *sk);
-extern void x25_start_heartbeat(struct sock *);
-extern void x25_start_t2timer(struct sock *);
-extern void x25_start_t21timer(struct sock *);
-extern void x25_start_t22timer(struct sock *);
-extern void x25_start_t23timer(struct sock *);
-extern void x25_stop_heartbeat(struct sock *);
-extern void x25_stop_timer(struct sock *);
-extern unsigned long x25_display_timer(struct sock *);
-extern void x25_check_rbuf(struct sock *);
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
 
 /* sysctl_net_x25.c */
 #ifdef CONFIG_SYSCTL
-extern void x25_register_sysctl(void);
-extern void x25_unregister_sysctl(void);
+void x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
 #else
 static inline void x25_register_sysctl(void) {};
 static inline void x25_unregister_sysctl(void) {};
@@ -318,6 +319,6 @@ extern rwlock_t x25_forward_list_lock;
 extern struct list_head x25_neigh_list;
 extern rwlock_t x25_neigh_list_lock;
 
-extern int x25_proc_init(void);
-extern void x25_proc_exit(void);
+int x25_proc_init(void);
+void x25_proc_exit(void);
 #endif
index e253bf0cc7ef005721de3b7e8729cfabce52854f..6b82fdf4ba716898ea53fb0cf2e66690a66d1479 100644 (file)
@@ -307,15 +307,17 @@ struct xfrm_policy_afinfo {
        struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
-extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
+int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+                     const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
-extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
-extern int __xfrm_state_delete(struct xfrm_state *x);
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+            struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
 
 struct xfrm_state_afinfo {
        unsigned int            family;
@@ -344,12 +346,12 @@ struct xfrm_state_afinfo {
        void                    (*local_error)(struct sk_buff *skb, u32 mtu);
 };
 
-extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
-extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
-extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
-extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
        char                    *description;
@@ -372,8 +374,8 @@ struct xfrm_type {
        u32                     (*get_mtu)(struct xfrm_state *, int size);
 };
 
-extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
-extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
 
 struct xfrm_mode {
        /*
@@ -434,8 +436,8 @@ enum {
        XFRM_MODE_FLAG_TUNNEL = 1,
 };
 
-extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
-extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
+int xfrm_register_mode(struct xfrm_mode *mode, int family);
+int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
 
 static inline int xfrm_af2proto(unsigned int family)
 {
@@ -595,8 +597,8 @@ struct xfrm_mgr {
                                           const struct xfrm_kmaddress *k);
 };
 
-extern int xfrm_register_km(struct xfrm_mgr *km);
-extern int xfrm_unregister_km(struct xfrm_mgr *km);
+int xfrm_register_km(struct xfrm_mgr *km);
+int xfrm_unregister_km(struct xfrm_mgr *km);
 
 /*
  * This structure is used for the duration where packets are being
@@ -713,23 +715,23 @@ static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid,
                audit_log_task_context(audit_buf);
 }
 
-extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
-                                            struct sk_buff *skb);
-extern void xfrm_audit_state_replay(struct xfrm_state *x,
-                                   struct sk_buff *skb, __be32 net_seq);
-extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
-extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
-                                     __be32 net_spi, __be32 net_seq);
-extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
-                                    struct sk_buff *skb, u8 proto);
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
+                          u32 ses, u32 secid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
+                             u32 ses, u32 secid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
+                         u32 ses, u32 secid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
+                            u32 ses, u32 secid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+                                     struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+                            __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+                              __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+                             u8 proto);
 #else
 
 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
@@ -784,7 +786,7 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy)
                atomic_inc(&policy->refcnt);
 }
 
-extern void xfrm_policy_destroy(struct xfrm_policy *policy);
+void xfrm_policy_destroy(struct xfrm_policy *policy);
 
 static inline void xfrm_pol_put(struct xfrm_policy *policy)
 {
@@ -799,7 +801,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
                xfrm_pol_put(pols[i]);
 }
 
-extern void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *);
 
 static inline void __xfrm_state_put(struct xfrm_state *x)
 {
@@ -903,9 +905,8 @@ __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
        return port;
 }
 
-extern bool xfrm_selector_match(const struct xfrm_selector *sel,
-                               const struct flowi *fl,
-                               unsigned short family);
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+                        const struct flowi *fl, unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 /*     If neither has a context --> match
@@ -975,7 +976,7 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 }
 #endif
 
-extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
 struct sec_path {
        atomic_t                refcnt;
@@ -1000,7 +1001,7 @@ secpath_get(struct sec_path *sp)
        return sp;
 }
 
-extern void __secpath_destroy(struct sec_path *sp);
+void __secpath_destroy(struct sec_path *sp);
 
 static inline void
 secpath_put(struct sec_path *sp)
@@ -1009,7 +1010,7 @@ secpath_put(struct sec_path *sp)
                __secpath_destroy(sp);
 }
 
-extern struct sec_path *secpath_dup(struct sec_path *src);
+struct sec_path *secpath_dup(struct sec_path *src);
 
 static inline void
 secpath_reset(struct sk_buff *skb)
@@ -1059,7 +1060,8 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
 }
 
 #ifdef CONFIG_XFRM
-extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+                       unsigned short family);
 
 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                                       struct sk_buff *skb,
@@ -1103,8 +1105,8 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
        return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
 }
 
-extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
-                                unsigned int family, int reverse);
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+                         unsigned int family, int reverse);
 
 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
                                      unsigned int family)
@@ -1119,7 +1121,7 @@ static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
        return __xfrm_decode_session(skb, fl, family, 1);
 }
 
-extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
 
 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 {
@@ -1140,7 +1142,7 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
        return xfrm_route_forward(skb, AF_INET6);
 }
 
-extern int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk);
 
 static inline int xfrm_sk_clone_policy(struct sock *sk)
 {
@@ -1149,7 +1151,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
        return 0;
 }
 
-extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
 
 static inline void xfrm_sk_free_policy(struct sock *sk)
 {
@@ -1163,7 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
        }
 }
 
-extern void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect(struct net *net);
 
 #else
 
@@ -1355,6 +1357,12 @@ struct xfrm_tunnel {
        int priority;
 };
 
+struct xfrm_tunnel_notifier {
+       int (*handler)(struct sk_buff *skb);
+       struct xfrm_tunnel_notifier __rcu *next;
+       int priority;
+};
+
 struct xfrm6_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -1363,16 +1371,16 @@ struct xfrm6_tunnel {
        int priority;
 };
 
-extern void xfrm_init(void);
-extern void xfrm4_init(void);
-extern int xfrm_state_init(struct net *net);
-extern void xfrm_state_fini(struct net *net);
-extern void xfrm4_state_init(void);
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
 #ifdef CONFIG_XFRM
-extern int xfrm6_init(void);
-extern void xfrm6_fini(void);
-extern int xfrm6_state_init(void);
-extern void xfrm6_state_fini(void);
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
 #else
 static inline int xfrm6_init(void)
 {
@@ -1385,52 +1393,52 @@ static inline void xfrm6_fini(void)
 #endif
 
 #ifdef CONFIG_XFRM_STATISTICS
-extern int xfrm_proc_init(struct net *net);
-extern void xfrm_proc_fini(struct net *net);
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
 #endif
 
-extern int xfrm_sysctl_init(struct net *net);
+int xfrm_sysctl_init(struct net *net);
 #ifdef CONFIG_SYSCTL
-extern void xfrm_sysctl_fini(struct net *net);
+void xfrm_sysctl_fini(struct net *net);
 #else
 static inline void xfrm_sysctl_fini(struct net *net)
 {
 }
 #endif
 
-extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
-extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
-                          int (*func)(struct xfrm_state *, int, void*), void *);
-extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
-extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
-                                         const xfrm_address_t *saddr,
-                                         const struct flowi *fl,
-                                         struct xfrm_tmpl *tmpl,
-                                         struct xfrm_policy *pol, int *err,
-                                         unsigned short family);
-extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
-                                              xfrm_address_t *daddr,
-                                              xfrm_address_t *saddr,
-                                              unsigned short family,
-                                              u8 mode, u8 proto, u32 reqid);
-extern int xfrm_state_check_expire(struct xfrm_state *x);
-extern void xfrm_state_insert(struct xfrm_state *x);
-extern int xfrm_state_add(struct xfrm_state *x);
-extern int xfrm_state_update(struct xfrm_state *x);
-extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
-                                           const xfrm_address_t *daddr, __be32 spi,
-                                           u8 proto, unsigned short family);
-extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                                                  const xfrm_address_t *daddr,
-                                                  const xfrm_address_t *saddr,
-                                                  u8 proto,
-                                                  unsigned short family);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+                   int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+                                  const xfrm_address_t *saddr,
+                                  const struct flowi *fl,
+                                  struct xfrm_tmpl *tmpl,
+                                  struct xfrm_policy *pol, int *err,
+                                  unsigned short family);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+                                      xfrm_address_t *daddr,
+                                      xfrm_address_t *saddr,
+                                      unsigned short family,
+                                      u8 mode, u8 proto, u32 reqid);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+                                    const xfrm_address_t *daddr, __be32 spi,
+                                    u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+                                           const xfrm_address_t *daddr,
+                                           const xfrm_address_t *saddr,
+                                           u8 proto,
+                                           unsigned short family);
 #ifdef CONFIG_XFRM_SUB_POLICY
-extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
-                         int n, unsigned short family);
-extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
-                          int n, unsigned short family);
+int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+                  unsigned short family);
+int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+                   unsigned short family);
 #else
 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
                                 int n, unsigned short family)
@@ -1462,68 +1470,69 @@ struct xfrmk_spdinfo {
        u32 spdhmcnt;
 };
 
-extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
-                                             u32 seq);
-extern int xfrm_state_delete(struct xfrm_state *x);
-extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
-extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
-extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-extern int xfrm_init_replay(struct xfrm_state *x);
-extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
-extern int xfrm_init_state(struct xfrm_state *x);
-extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
-                     int encap_type);
-extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
-extern int xfrm_output_resume(struct sk_buff *skb, int err);
-extern int xfrm_output(struct sk_buff *skb);
-extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern void xfrm_local_error(struct sk_buff *skb, int mtu);
-extern int xfrm4_extract_header(struct sk_buff *skb);
-extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
-                          int encap_type);
-extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm4_rcv(struct sk_buff *skb);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output(struct sk_buff *skb);
+int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_header(struct sk_buff *skb);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+                   int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
 
 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
        return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
 }
 
-extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_output(struct sk_buff *skb);
-extern int xfrm4_output_finish(struct sk_buff *skb);
-extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
-extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
-extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-extern int xfrm6_extract_header(struct sk_buff *skb);
-extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
-extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm6_rcv(struct sk_buff *skb);
-extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
-                           xfrm_address_t *saddr, u8 proto);
-extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
-extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
-extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
-extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_output(struct sk_buff *skb);
-extern int xfrm6_output_finish(struct sk_buff *skb);
-extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
-                                u8 **prevhdr);
-extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_output(struct sk_buff *skb);
+int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_extract_header(struct sk_buff *skb);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+                    xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_output(struct sk_buff *skb);
+int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
+                         u8 **prevhdr);
 
 #ifdef CONFIG_XFRM
-extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname,
+                    u8 __user *optval, int optlen);
 #else
 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
 {
@@ -1540,59 +1549,62 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
 
 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
 
-extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
-extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
-       int (*func)(struct xfrm_policy *, int, int, void*), void *);
-extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+                    int (*func)(struct xfrm_policy *, int, int, void*),
+                    void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
                                          u8 type, int dir,
                                          struct xfrm_selector *sel,
                                          struct xfrm_sec_ctx *ctx, int delete,
                                          int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
+                                    u32 id, int delete, int *err);
 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
-extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
                                 u8 mode, u32 reqid, u8 proto,
                                 const xfrm_address_t *daddr,
                                 const xfrm_address_t *saddr, int create,
                                 unsigned short family);
-extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 
 #ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-                     const struct xfrm_migrate *m, int num_bundles,
-                     const struct xfrm_kmaddress *k);
-extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
-extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
-                                             struct xfrm_migrate *m);
-extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-                       struct xfrm_migrate *m, int num_bundles,
-                       struct xfrm_kmaddress *k);
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+              const struct xfrm_migrate *m, int num_bundles,
+              const struct xfrm_kmaddress *k);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+                                     struct xfrm_migrate *m);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                struct xfrm_migrate *m, int num_bundles,
+                struct xfrm_kmaddress *k);
 #endif
 
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
-extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-
-extern void xfrm_input_init(void);
-extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
-
-extern void xfrm_probe_algs(void);
-extern int xfrm_count_pfkey_auth_supported(void);
-extern int xfrm_count_pfkey_enc_supported(void);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
-                                                  int probe);
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+             xfrm_address_t *addr);
+
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+                                           int probe);
 
 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
                                    const xfrm_address_t *b)
index fe66533e9b7a51ef143acd33ff398bee47f71398..fb0a312bcb810c93622028ef93934d3779189142 100644 (file)
@@ -68,6 +68,7 @@ struct rsnd_scu_platform_info {
  *
  * A : generation
  */
+#define RSND_GEN_MASK  (0xF << 0)
 #define RSND_GEN1      (1 << 0) /* fixme */
 #define RSND_GEN2      (2 << 0) /* fixme */
 
index 60ae7c3db912de7e068452de1a1c1978cad0a662..4c2301d2ef1aa979ea0d6594ad1b6404368b920b 100644 (file)
@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
                __field( unsigned int,  nr_sector       )
                __field( dev_t,         old_dev         )
                __field( sector_t,      old_sector      )
+               __field( unsigned int,  nr_bios         )
                __array( char,          rwbs,   RWBS_LEN)
        ),
 
@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
                __entry->nr_sector      = blk_rq_sectors(rq);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
+               __entry->nr_bios        = blk_rq_count_bios(rq);
                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
        ),
 
-       TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+       TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
                  (unsigned long long)__entry->sector,
                  __entry->nr_sector,
                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-                 (unsigned long long)__entry->old_sector)
+                 (unsigned long long)__entry->old_sector, __entry->nr_bios)
 );
 
 #endif /* _TRACE_BLOCK_H */
index 45702c3c3837f316709438a462d4a6517e0ba168..f18b3b76e01e22e00c00ee133b2ebdc1013bcc62 100644 (file)
@@ -42,6 +42,7 @@ struct extent_buffer;
                { BTRFS_TREE_LOG_OBJECTID,      "TREE_LOG"      },      \
                { BTRFS_QUOTA_TREE_OBJECTID,    "QUOTA_TREE"    },      \
                { BTRFS_TREE_RELOC_OBJECTID,    "TREE_RELOC"    },      \
+               { BTRFS_UUID_TREE_OBJECTID,     "UUID_RELOC"    },      \
                { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
 
 #define show_root_type(obj)                                            \
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644 (file)
index 0000000..f76dd4d
--- /dev/null
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto                    \
+       TP_PROTO(u64 timestamp,                 \
+                u32 cpu_hwid)
+#define __cpu_migrate_args                     \
+       TP_ARGS(timestamp,                      \
+               cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+       __cpu_migrate_proto,
+       __cpu_migrate_args,
+
+       TP_STRUCT__entry(
+               __field(u64,    timestamp               )
+               __field(u32,    cpu_hwid                )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->cpu_hwid = cpu_hwid;
+       ),
+
+       TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+               (unsigned long long)__entry->timestamp,
+               (unsigned long)__entry->cpu_hwid
+       )
+);
+
+#define __define_cpu_migrate_event(name)               \
+       DEFINE_EVENT(cpu_migrate, cpu_migrate_##name,   \
+               __cpu_migrate_proto,                    \
+               __cpu_migrate_args                      \
+       )
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
index f04b69b6abf251d6f0ba720db25822b11b953489..38f14d0264c3aa78f3b62b93cba405c655e23ae6 100644 (file)
@@ -78,4 +78,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_GENERIC_SOCKET_H */
index 550811712f78c71aad43b4a531b07e1df536b64f..28acbaf4a81ec091a54740c54adfd57ec2f9ad59 100644 (file)
@@ -223,6 +223,8 @@ struct drm_mode_get_connector {
        __u32 connection;
        __u32 mm_width, mm_height; /**< HxW in millimeters */
        __u32 subpixel;
+
+       __u32 pad;
 };
 
 #define DRM_MODE_PROP_PENDING  (1<<0)
index fa8b3adf9ffbbc478a7aae0f83ffbe3bd584a91b..46d41e8b0dccec30ec5b52f6dc772bf9e3088dc6 100644 (file)
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
 #define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA      3
 #define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA      2
 
+#define CIK_TILE_MODE_DEPTH_STENCIL_1D         5
+
 #endif
index 75cef3fd97add201693b6bf21f4e2f1754c96f43..db0b825b48109f2e8cc011f211749043757a33e5 100644 (file)
@@ -329,7 +329,6 @@ enum {
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
 #define AUDIT_ARCH_CRIS                (EM_CRIS|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_FRV         (EM_FRV)
-#define AUDIT_ARCH_H8300       (EM_H8_300)
 #define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_IA64                (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_M32R                (EM_M32R)
index 3ebe387fea4d809790a26bbfc9aa3cea02f20cfb..382251a1d21403acd817577d83c21f47d0389865 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_BCM_H
index 7b7148bded711b1346a2274a09af5b505e7af72e..b632045453202074ada263866052bc2a806e85bc 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_ERROR_H
index 4e27c82b564a13a6a4b55860c3cb78b51f86dc7e..844c8964bdfee3a3f4a7308bf0fd832e82754a89 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2011 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_GW_H
index 14966ddb7df1c5b056578ff622c77cd6400e8094..df944ed206a8e4bd23d9f9a3ec9e08c9e0ad6e3b 100644 (file)
@@ -5,6 +5,14 @@
  *
  * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
  */
 
 #ifndef CAN_NETLINK_H
index a814062b07191819c80812b843a46bd80870344b..c7d8c334e0ce26838c7cc611bd3ad1eb5a31a6c4 100644 (file)
@@ -8,6 +8,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_RAW_H
index 59c17a2d38ad4ce95be09055423907beed49b9a2..01529bd964387b4d571bbf689bf2bbf98328692d 100644 (file)
@@ -31,7 +31,6 @@
 #define EM_CRIS                76      /* Axis Communications 32-bit embedded processor */
 #define EM_V850                87      /* NEC v850 */
 #define EM_M32R                88      /* Renesas M32R */
-#define EM_H8_300      46      /* Renesas H8/300,300H,H8S */
 #define EM_MN10300     89      /* Panasonic/MEI MN10300, AM33 */
 #define EM_BLACKFIN     106     /* ADI Blackfin Processor */
 #define EM_TI_C6000    140     /* TI C6X DSPs */
index a17edda8a7816c2ba92e7c1b7aee3cce23b93f8f..9635a62f6f89c781a9086af47df6382c59fea161 100644 (file)
@@ -91,6 +91,8 @@
 #define BOND_XMIT_POLICY_LAYER2                0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34       1 /* layer 3+4 (IP ^ (TCP || UDP)) */
 #define BOND_XMIT_POLICY_LAYER23       2 /* layer 2+3 (IP ^ MAC) */
+#define BOND_XMIT_POLICY_ENCAP23       3 /* encapsulated layer 2+3 */
+#define BOND_XMIT_POLICY_ENCAP34       4 /* encapsulated layer 3+4 */
 
 typedef struct ifbond {
        __s32 bond_mode;
index 80394e8dc3a348ed6ac7779a410c0bcda80ac284..8a1e346243b7cdcb0e9ab676e3ddcf086284d90f 100644 (file)
@@ -325,6 +325,17 @@ struct ifla_vxlan_port_range {
        __be16  high;
 };
 
+/* Bonding section */
+
+enum {
+       IFLA_BOND_UNSPEC,
+       IFLA_BOND_MODE,
+       IFLA_BOND_ACTIVE_SLAVE,
+       __IFLA_BOND_MAX,
+};
+
+#define IFLA_BOND_MAX  (__IFLA_BOND_MAX - 1)
+
 /* SR-IOV virtual function management section */
 
 enum {
index e0cecd2eabdc5f2289a1940bf3d00ad8ebc70ee8..6edc6b68badd5385d2fdea6e5f524714c4f3cb90 100644 (file)
@@ -21,6 +21,7 @@ enum {
        LO_FLAGS_READ_ONLY      = 1,
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
+       LO_FLAGS_USE_AIO        = 16,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
index 174915420d3fe8231dc151519ceac551aceb148e..17c3af2c4bb958b636ec8553056709769dd9d73b 100644 (file)
@@ -5,6 +5,8 @@ header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
 header-y += nf_conntrack_tcp.h
 header-y += nf_conntrack_tuple_common.h
+header-y += nf_tables.h
+header-y += nf_tables_compat.h
 header-y += nf_nat.h
 header-y += nfnetlink.h
 header-y += nfnetlink_acct.h
index 8024cdf13b700560e9bd0c1f3df1bacbdd90b900..25d3b2f79c022e92cfd8f851e0f9ae8a2f7b731a 100644 (file)
 #ifndef _UAPI_IP_SET_H
 #define _UAPI_IP_SET_H
 
-
 #include <linux/types.h>
 
 /* The protocol version */
 #define IPSET_PROTOCOL         6
 
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE 255
+
 /* The max length of strings including NUL: set and type identifiers */
 #define IPSET_MAXNAMELEN       32
 
@@ -110,6 +112,7 @@ enum {
        IPSET_ATTR_IFACE,
        IPSET_ATTR_BYTES,
        IPSET_ATTR_PACKETS,
+       IPSET_ATTR_COMMENT,
        __IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX     (__IPSET_ATTR_ADT_MAX - 1)
@@ -140,6 +143,7 @@ enum ipset_errno {
        IPSET_ERR_IPADDR_IPV4,
        IPSET_ERR_IPADDR_IPV6,
        IPSET_ERR_COUNTER,
+       IPSET_ERR_COMMENT,
 
        /* Type specific error codes */
        IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -176,6 +180,8 @@ enum ipset_cadt_flags {
        IPSET_FLAG_NOMATCH      = (1 << IPSET_FLAG_BIT_NOMATCH),
        IPSET_FLAG_BIT_WITH_COUNTERS = 3,
        IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
+       IPSET_FLAG_BIT_WITH_COMMENT = 4,
+       IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
        IPSET_FLAG_CADT_MAX     = 15,
 };
 
@@ -250,6 +256,14 @@ struct ip_set_req_get_set {
 #define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
 /* Uses ip_set_req_get_set */
 
+#define IP_SET_OP_GET_FNAME    0x00000008      /* Get set index and family */
+struct ip_set_req_get_set_family {
+       unsigned int op;
+       unsigned int version;
+       unsigned int family;
+       union ip_set_name_index set;
+};
+
 #define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
 struct ip_set_req_version {
        unsigned int op;
index 8dd803818ebe34c63db7792dadf4614632bdd93e..319f47128db8c117563efa26d4cd54e1c3ac2cc2 100644 (file)
@@ -25,6 +25,10 @@ enum ip_conntrack_info {
        IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
 };
 
+#define NF_CT_STATE_INVALID_BIT                        (1 << 0)
+#define NF_CT_STATE_BIT(ctinfo)                        (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
+#define NF_CT_STATE_UNTRACKED_BIT              (1 << (IP_CT_NUMBER + 1))
+
 /* Bitset representing status of connection. */
 enum ip_conntrack_status {
        /* It's an expected connection: bit 0 set.  This bit never changed */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
new file mode 100644 (file)
index 0000000..fbfd229
--- /dev/null
@@ -0,0 +1,718 @@
+#ifndef _LINUX_NF_TABLES_H
+#define _LINUX_NF_TABLES_H
+
+#define NFT_CHAIN_MAXNAMELEN 32
+
+enum nft_registers {
+       NFT_REG_VERDICT,
+       NFT_REG_1,
+       NFT_REG_2,
+       NFT_REG_3,
+       NFT_REG_4,
+       __NFT_REG_MAX
+};
+#define NFT_REG_MAX    (__NFT_REG_MAX - 1)
+
+/**
+ * enum nft_verdicts - nf_tables internal verdicts
+ *
+ * @NFT_CONTINUE: continue evaluation of the current rule
+ * @NFT_BREAK: terminate evaluation of the current rule
+ * @NFT_JUMP: push the current chain on the jump stack and jump to a chain
+ * @NFT_GOTO: jump to a chain without pushing the current chain on the jump stack
+ * @NFT_RETURN: return to the topmost chain on the jump stack
+ *
+ * The nf_tables verdicts share their numeric space with the netfilter verdicts.
+ */
+enum nft_verdicts {
+       NFT_CONTINUE    = -1,
+       NFT_BREAK       = -2,
+       NFT_JUMP        = -3,
+       NFT_GOTO        = -4,
+       NFT_RETURN      = -5,
+};
+
+/**
+ * enum nf_tables_msg_types - nf_tables netlink message types
+ *
+ * @NFT_MSG_NEWTABLE: create a new table (enum nft_table_attributes)
+ * @NFT_MSG_GETTABLE: get a table (enum nft_table_attributes)
+ * @NFT_MSG_DELTABLE: delete a table (enum nft_table_attributes)
+ * @NFT_MSG_NEWCHAIN: create a new chain (enum nft_chain_attributes)
+ * @NFT_MSG_GETCHAIN: get a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DELCHAIN: delete a chain (enum nft_chain_attributes)
+ * @NFT_MSG_NEWRULE: create a new rule (enum nft_rule_attributes)
+ * @NFT_MSG_GETRULE: get a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DELRULE: delete a rule (enum nft_rule_attributes)
+ * @NFT_MSG_NEWSET: create a new set (enum nft_set_attributes)
+ * @NFT_MSG_GETSET: get a set (enum nft_set_attributes)
+ * @NFT_MSG_DELSET: delete a set (enum nft_set_attributes)
+ * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ */
+enum nf_tables_msg_types {
+       NFT_MSG_NEWTABLE,
+       NFT_MSG_GETTABLE,
+       NFT_MSG_DELTABLE,
+       NFT_MSG_NEWCHAIN,
+       NFT_MSG_GETCHAIN,
+       NFT_MSG_DELCHAIN,
+       NFT_MSG_NEWRULE,
+       NFT_MSG_GETRULE,
+       NFT_MSG_DELRULE,
+       NFT_MSG_NEWSET,
+       NFT_MSG_GETSET,
+       NFT_MSG_DELSET,
+       NFT_MSG_NEWSETELEM,
+       NFT_MSG_GETSETELEM,
+       NFT_MSG_DELSETELEM,
+       NFT_MSG_MAX,
+};
+
+/**
+ * enum nft_list_attributes - nf_tables generic list netlink attributes
+ *
+ * @NFTA_LIST_ELEM: list element (NLA_NESTED)
+ */
+enum nft_list_attributes {
+       NFTA_LIST_UNPEC,
+       NFTA_LIST_ELEM,
+       __NFTA_LIST_MAX
+};
+#define NFTA_LIST_MAX          (__NFTA_LIST_MAX - 1)
+
+/**
+ * enum nft_hook_attributes - nf_tables netfilter hook netlink attributes
+ *
+ * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ */
+enum nft_hook_attributes {
+       NFTA_HOOK_UNSPEC,
+       NFTA_HOOK_HOOKNUM,
+       NFTA_HOOK_PRIORITY,
+       __NFTA_HOOK_MAX
+};
+#define NFTA_HOOK_MAX          (__NFTA_HOOK_MAX - 1)
+
+/**
+ * enum nft_table_flags - nf_tables table flags
+ *
+ * @NFT_TABLE_F_DORMANT: this table is not active
+ */
+enum nft_table_flags {
+       NFT_TABLE_F_DORMANT     = 0x1,
+};
+
+/**
+ * enum nft_table_attributes - nf_tables table netlink attributes
+ *
+ * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
+ * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
+ */
+enum nft_table_attributes {
+       NFTA_TABLE_UNSPEC,
+       NFTA_TABLE_NAME,
+       NFTA_TABLE_FLAGS,
+       __NFTA_TABLE_MAX
+};
+#define NFTA_TABLE_MAX         (__NFTA_TABLE_MAX - 1)
+
+/**
+ * enum nft_chain_attributes - nf_tables chain netlink attributes
+ *
+ * @NFTA_CHAIN_TABLE: name of the table containing the chain (NLA_STRING)
+ * @NFTA_CHAIN_HANDLE: numeric handle of the chain (NLA_U64)
+ * @NFTA_CHAIN_NAME: name of the chain (NLA_STRING)
+ * @NFTA_CHAIN_HOOK: hook specification for basechains (NLA_NESTED: nft_hook_attributes)
+ * @NFTA_CHAIN_POLICY: numeric policy of the chain (NLA_U32)
+ * @NFTA_CHAIN_USE: number of references to this chain (NLA_U32)
+ * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
+ * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
+ */
+enum nft_chain_attributes {
+       NFTA_CHAIN_UNSPEC,
+       NFTA_CHAIN_TABLE,
+       NFTA_CHAIN_HANDLE,
+       NFTA_CHAIN_NAME,
+       NFTA_CHAIN_HOOK,
+       NFTA_CHAIN_POLICY,
+       NFTA_CHAIN_USE,
+       NFTA_CHAIN_TYPE,
+       NFTA_CHAIN_COUNTERS,
+       __NFTA_CHAIN_MAX
+};
+#define NFTA_CHAIN_MAX         (__NFTA_CHAIN_MAX - 1)
+
+/**
+ * enum nft_rule_attributes - nf_tables rule netlink attributes
+ *
+ * @NFTA_RULE_TABLE: name of the table containing the rule (NLA_STRING)
+ * @NFTA_RULE_CHAIN: name of the chain containing the rule (NLA_STRING)
+ * @NFTA_RULE_HANDLE: numeric handle of the rule (NLA_U64)
+ * @NFTA_RULE_EXPRESSIONS: list of expressions (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
+ * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
+ */
+enum nft_rule_attributes {
+       NFTA_RULE_UNSPEC,
+       NFTA_RULE_TABLE,
+       NFTA_RULE_CHAIN,
+       NFTA_RULE_HANDLE,
+       NFTA_RULE_EXPRESSIONS,
+       NFTA_RULE_COMPAT,
+       NFTA_RULE_POSITION,
+       __NFTA_RULE_MAX
+};
+#define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
+
+/**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+enum nft_rule_compat_flags {
+       NFT_RULE_COMPAT_F_INV   = (1 << 1),
+       NFT_RULE_COMPAT_F_MASK  = NFT_RULE_COMPAT_F_INV,
+};
+
+/**
+ * enum nft_rule_compat_attributes - nf_tables rule compat attributes
+ *
+ * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
+ */
+enum nft_rule_compat_attributes {
+       NFTA_RULE_COMPAT_UNSPEC,
+       NFTA_RULE_COMPAT_PROTO,
+       NFTA_RULE_COMPAT_FLAGS,
+       __NFTA_RULE_COMPAT_MAX
+};
+#define NFTA_RULE_COMPAT_MAX   (__NFTA_RULE_COMPAT_MAX - 1)
+
+/**
+ * enum nft_set_flags - nf_tables set flags
+ *
+ * @NFT_SET_ANONYMOUS: name allocation, automatic cleanup on unlink
+ * @NFT_SET_CONSTANT: set contents may not change while bound
+ * @NFT_SET_INTERVAL: set contains intervals
+ * @NFT_SET_MAP: set is used as a dictionary
+ */
+enum nft_set_flags {
+       NFT_SET_ANONYMOUS               = 0x1,
+       NFT_SET_CONSTANT                = 0x2,
+       NFT_SET_INTERVAL                = 0x4,
+       NFT_SET_MAP                     = 0x8,
+};
+
+/**
+ * enum nft_set_attributes - nf_tables set netlink attributes
+ *
+ * @NFTA_SET_TABLE: table name (NLA_STRING)
+ * @NFTA_SET_NAME: set name (NLA_STRING)
+ * @NFTA_SET_FLAGS: bitmask of enum nft_set_flags (NLA_U32)
+ * @NFTA_SET_KEY_TYPE: key data type, informational purpose only (NLA_U32)
+ * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
+ * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
+ * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ */
+enum nft_set_attributes {
+       NFTA_SET_UNSPEC,
+       NFTA_SET_TABLE,
+       NFTA_SET_NAME,
+       NFTA_SET_FLAGS,
+       NFTA_SET_KEY_TYPE,
+       NFTA_SET_KEY_LEN,
+       NFTA_SET_DATA_TYPE,
+       NFTA_SET_DATA_LEN,
+       __NFTA_SET_MAX
+};
+#define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
+
+/**
+ * enum nft_set_elem_flags - nf_tables set element flags
+ *
+ * @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ */
+enum nft_set_elem_flags {
+       NFT_SET_ELEM_INTERVAL_END       = 0x1,
+};
+
+/**
+ * enum nft_set_elem_attributes - nf_tables set element netlink attributes
+ *
+ * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
+ * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ */
+enum nft_set_elem_attributes {
+       NFTA_SET_ELEM_UNSPEC,
+       NFTA_SET_ELEM_KEY,
+       NFTA_SET_ELEM_DATA,
+       NFTA_SET_ELEM_FLAGS,
+       __NFTA_SET_ELEM_MAX
+};
+#define NFTA_SET_ELEM_MAX      (__NFTA_SET_ELEM_MAX - 1)
+
+/**
+ * enum nft_set_elem_list_attributes - nf_tables set element list netlink attributes
+ *
+ * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ */
+enum nft_set_elem_list_attributes {
+       NFTA_SET_ELEM_LIST_UNSPEC,
+       NFTA_SET_ELEM_LIST_TABLE,
+       NFTA_SET_ELEM_LIST_SET,
+       NFTA_SET_ELEM_LIST_ELEMENTS,
+       __NFTA_SET_ELEM_LIST_MAX
+};
+#define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
+
+/**
+ * enum nft_data_types - nf_tables data types
+ *
+ * @NFT_DATA_VALUE: generic data
+ * @NFT_DATA_VERDICT: netfilter verdict
+ *
+ * The type of data is usually determined by the kernel directly and is not
+ * explicitly specified by userspace. The only difference are sets, where
+ * userspace specifies the key and mapping data types.
+ *
+ * The values 0xffffff00-0xffffffff are reserved for internally used types.
+ * The remaining range can be freely used by userspace to encode types, all
+ * values are equivalent to NFT_DATA_VALUE.
+ */
+enum nft_data_types {
+       NFT_DATA_VALUE,
+       NFT_DATA_VERDICT        = 0xffffff00U,
+};
+
+#define NFT_DATA_RESERVED_MASK 0xffffff00U
+
+/**
+ * enum nft_data_attributes - nf_tables data netlink attributes
+ *
+ * @NFTA_DATA_VALUE: generic data (NLA_BINARY)
+ * @NFTA_DATA_VERDICT: nf_tables verdict (NLA_NESTED: nft_verdict_attributes)
+ */
+enum nft_data_attributes {
+       NFTA_DATA_UNSPEC,
+       NFTA_DATA_VALUE,
+       NFTA_DATA_VERDICT,
+       __NFTA_DATA_MAX
+};
+#define NFTA_DATA_MAX          (__NFTA_DATA_MAX - 1)
+
+/**
+ * enum nft_verdict_attributes - nf_tables verdict netlink attributes
+ *
+ * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
+ * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ */
+enum nft_verdict_attributes {
+       NFTA_VERDICT_UNSPEC,
+       NFTA_VERDICT_CODE,
+       NFTA_VERDICT_CHAIN,
+       __NFTA_VERDICT_MAX
+};
+#define NFTA_VERDICT_MAX       (__NFTA_VERDICT_MAX - 1)
+
+/**
+ * enum nft_expr_attributes - nf_tables expression netlink attributes
+ *
+ * @NFTA_EXPR_NAME: name of the expression type (NLA_STRING)
+ * @NFTA_EXPR_DATA: type specific data (NLA_NESTED)
+ */
+enum nft_expr_attributes {
+       NFTA_EXPR_UNSPEC,
+       NFTA_EXPR_NAME,
+       NFTA_EXPR_DATA,
+       __NFTA_EXPR_MAX
+};
+#define NFTA_EXPR_MAX          (__NFTA_EXPR_MAX - 1)
+
+/**
+ * enum nft_immediate_attributes - nf_tables immediate expression netlink attributes
+ *
+ * @NFTA_IMMEDIATE_DREG: destination register to load data into (NLA_U32)
+ * @NFTA_IMMEDIATE_DATA: data to load (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_immediate_attributes {
+       NFTA_IMMEDIATE_UNSPEC,
+       NFTA_IMMEDIATE_DREG,
+       NFTA_IMMEDIATE_DATA,
+       __NFTA_IMMEDIATE_MAX
+};
+#define NFTA_IMMEDIATE_MAX     (__NFTA_IMMEDIATE_MAX - 1)
+
+/**
+ * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
+ *
+ * @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_LEN: length of operands (NLA_U32)
+ * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ *
+ * The bitwise expression performs the following operation:
+ *
+ * dreg = (sreg & mask) ^ xor
+ *
+ * which allow to express all bitwise operations:
+ *
+ *             mask    xor
+ * NOT:                1       1
+ * OR:         0       x
+ * XOR:                1       x
+ * AND:                x       0
+ */
+enum nft_bitwise_attributes {
+       NFTA_BITWISE_UNSPEC,
+       NFTA_BITWISE_SREG,
+       NFTA_BITWISE_DREG,
+       NFTA_BITWISE_LEN,
+       NFTA_BITWISE_MASK,
+       NFTA_BITWISE_XOR,
+       __NFTA_BITWISE_MAX
+};
+#define NFTA_BITWISE_MAX       (__NFTA_BITWISE_MAX - 1)
+
+/**
+ * enum nft_byteorder_ops - nf_tables byteorder operators
+ *
+ * @NFT_BYTEORDER_NTOH: network to host operator
+ * @NFT_BYTEORDER_HTON: host to network opertaor
+ */
+enum nft_byteorder_ops {
+       NFT_BYTEORDER_NTOH,
+       NFT_BYTEORDER_HTON,
+};
+
+/**
+ * enum nft_byteorder_attributes - nf_tables byteorder expression netlink attributes
+ *
+ * @NFTA_BYTEORDER_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_OP: operator (NLA_U32: enum nft_byteorder_ops)
+ * @NFTA_BYTEORDER_LEN: length of the data (NLA_U32)
+ * @NFTA_BYTEORDER_SIZE: data size in bytes (NLA_U32: 2 or 4)
+ */
+enum nft_byteorder_attributes {
+       NFTA_BYTEORDER_UNSPEC,
+       NFTA_BYTEORDER_SREG,
+       NFTA_BYTEORDER_DREG,
+       NFTA_BYTEORDER_OP,
+       NFTA_BYTEORDER_LEN,
+       NFTA_BYTEORDER_SIZE,
+       __NFTA_BYTEORDER_MAX
+};
+#define NFTA_BYTEORDER_MAX     (__NFTA_BYTEORDER_MAX - 1)
+
+/**
+ * enum nft_cmp_ops - nf_tables relational operator
+ *
+ * @NFT_CMP_EQ: equal
+ * @NFT_CMP_NEQ: not equal
+ * @NFT_CMP_LT: less than
+ * @NFT_CMP_LTE: less than or equal to
+ * @NFT_CMP_GT: greater than
+ * @NFT_CMP_GTE: greater than or equal to
+ */
+enum nft_cmp_ops {
+       NFT_CMP_EQ,
+       NFT_CMP_NEQ,
+       NFT_CMP_LT,
+       NFT_CMP_LTE,
+       NFT_CMP_GT,
+       NFT_CMP_GTE,
+};
+
+/**
+ * enum nft_cmp_attributes - nf_tables cmp expression netlink attributes
+ *
+ * @NFTA_CMP_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_CMP_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_CMP_DATA: data to compare against (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_cmp_attributes {
+       NFTA_CMP_UNSPEC,
+       NFTA_CMP_SREG,
+       NFTA_CMP_OP,
+       NFTA_CMP_DATA,
+       __NFTA_CMP_MAX
+};
+#define NFTA_CMP_MAX           (__NFTA_CMP_MAX - 1)
+
+/**
+ * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes
+ *
+ * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
+ * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ */
+enum nft_lookup_attributes {
+       NFTA_LOOKUP_UNSPEC,
+       NFTA_LOOKUP_SET,
+       NFTA_LOOKUP_SREG,
+       NFTA_LOOKUP_DREG,
+       __NFTA_LOOKUP_MAX
+};
+#define NFTA_LOOKUP_MAX                (__NFTA_LOOKUP_MAX - 1)
+
+/**
+ * enum nft_payload_bases - nf_tables payload expression offset bases
+ *
+ * @NFT_PAYLOAD_LL_HEADER: link layer header
+ * @NFT_PAYLOAD_NETWORK_HEADER: network header
+ * @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ */
+enum nft_payload_bases {
+       NFT_PAYLOAD_LL_HEADER,
+       NFT_PAYLOAD_NETWORK_HEADER,
+       NFT_PAYLOAD_TRANSPORT_HEADER,
+};
+
+/**
+ * enum nft_payload_attributes - nf_tables payload expression netlink attributes
+ *
+ * @NFTA_PAYLOAD_DREG: destination register to load data into (NLA_U32: nft_registers)
+ * @NFTA_PAYLOAD_BASE: payload base (NLA_U32: nft_payload_bases)
+ * @NFTA_PAYLOAD_OFFSET: payload offset relative to base (NLA_U32)
+ * @NFTA_PAYLOAD_LEN: payload length (NLA_U32)
+ */
+enum nft_payload_attributes {
+       NFTA_PAYLOAD_UNSPEC,
+       NFTA_PAYLOAD_DREG,
+       NFTA_PAYLOAD_BASE,
+       NFTA_PAYLOAD_OFFSET,
+       NFTA_PAYLOAD_LEN,
+       __NFTA_PAYLOAD_MAX
+};
+#define NFTA_PAYLOAD_MAX       (__NFTA_PAYLOAD_MAX - 1)
+
+/**
+ * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes
+ *
+ * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8)
+ * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
+ * @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
+ */
+enum nft_exthdr_attributes {
+       NFTA_EXTHDR_UNSPEC,
+       NFTA_EXTHDR_DREG,
+       NFTA_EXTHDR_TYPE,
+       NFTA_EXTHDR_OFFSET,
+       NFTA_EXTHDR_LEN,
+       __NFTA_EXTHDR_MAX
+};
+#define NFTA_EXTHDR_MAX                (__NFTA_EXTHDR_MAX - 1)
+
+/**
+ * enum nft_meta_keys - nf_tables meta expression keys
+ *
+ * @NFT_META_LEN: packet length (skb->len)
+ * @NFT_META_PROTOCOL: packet ethertype protocol (skb->protocol), invalid in OUTPUT
+ * @NFT_META_PRIORITY: packet priority (skb->priority)
+ * @NFT_META_MARK: packet mark (skb->mark)
+ * @NFT_META_IIF: packet input interface index (dev->ifindex)
+ * @NFT_META_OIF: packet output interface index (dev->ifindex)
+ * @NFT_META_IIFNAME: packet input interface name (dev->name)
+ * @NFT_META_OIFNAME: packet output interface name (dev->name)
+ * @NFT_META_IIFTYPE: packet input interface type (dev->type)
+ * @NFT_META_OIFTYPE: packet output interface type (dev->type)
+ * @NFT_META_SKUID: originating socket UID (fsuid)
+ * @NFT_META_SKGID: originating socket GID (fsgid)
+ * @NFT_META_NFTRACE: packet nftrace bit
+ * @NFT_META_RTCLASSID: realm value of packet's route (skb->dst->tclassid)
+ * @NFT_META_SECMARK: packet secmark (skb->secmark)
+ */
+enum nft_meta_keys {
+       NFT_META_LEN,
+       NFT_META_PROTOCOL,
+       NFT_META_PRIORITY,
+       NFT_META_MARK,
+       NFT_META_IIF,
+       NFT_META_OIF,
+       NFT_META_IIFNAME,
+       NFT_META_OIFNAME,
+       NFT_META_IIFTYPE,
+       NFT_META_OIFTYPE,
+       NFT_META_SKUID,
+       NFT_META_SKGID,
+       NFT_META_NFTRACE,
+       NFT_META_RTCLASSID,
+       NFT_META_SECMARK,
+};
+
+/**
+ * enum nft_meta_attributes - nf_tables meta expression netlink attributes
+ *
+ * @NFTA_META_DREG: destination register (NLA_U32)
+ * @NFTA_META_KEY: meta data item to load (NLA_U32: nft_meta_keys)
+ */
+enum nft_meta_attributes {
+       NFTA_META_UNSPEC,
+       NFTA_META_DREG,
+       NFTA_META_KEY,
+       __NFTA_META_MAX
+};
+#define NFTA_META_MAX          (__NFTA_META_MAX - 1)
+
+/**
+ * enum nft_ct_keys - nf_tables ct expression keys
+ *
+ * @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
+ * @NFT_CT_DIRECTION: conntrack direction (enum ip_conntrack_dir)
+ * @NFT_CT_STATUS: conntrack status (bitmask of enum ip_conntrack_status)
+ * @NFT_CT_MARK: conntrack mark value
+ * @NFT_CT_SECMARK: conntrack secmark value
+ * @NFT_CT_EXPIRATION: relative conntrack expiration time in ms
+ * @NFT_CT_HELPER: connection tracking helper assigned to conntrack
+ * @NFT_CT_L3PROTOCOL: conntrack layer 3 protocol
+ * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address)
+ * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address)
+ * @NFT_CT_PROTOCOL: conntrack layer 4 protocol
+ * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
+ * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
+ */
+enum nft_ct_keys {
+       NFT_CT_STATE,
+       NFT_CT_DIRECTION,
+       NFT_CT_STATUS,
+       NFT_CT_MARK,
+       NFT_CT_SECMARK,
+       NFT_CT_EXPIRATION,
+       NFT_CT_HELPER,
+       NFT_CT_L3PROTOCOL,
+       NFT_CT_SRC,
+       NFT_CT_DST,
+       NFT_CT_PROTOCOL,
+       NFT_CT_PROTO_SRC,
+       NFT_CT_PROTO_DST,
+};
+
+/**
+ * enum nft_ct_attributes - nf_tables ct expression netlink attributes
+ *
+ * @NFTA_CT_DREG: destination register (NLA_U32)
+ * @NFTA_CT_KEY: conntrack data item to load (NLA_U32: nft_ct_keys)
+ * @NFTA_CT_DIRECTION: direction in case of directional keys (NLA_U8)
+ */
+enum nft_ct_attributes {
+       NFTA_CT_UNSPEC,
+       NFTA_CT_DREG,
+       NFTA_CT_KEY,
+       NFTA_CT_DIRECTION,
+       __NFTA_CT_MAX
+};
+#define NFTA_CT_MAX            (__NFTA_CT_MAX - 1)
+
+/**
+ * enum nft_limit_attributes - nf_tables limit expression netlink attributes
+ *
+ * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
+ * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ */
+enum nft_limit_attributes {
+       NFTA_LIMIT_UNSPEC,
+       NFTA_LIMIT_RATE,
+       NFTA_LIMIT_UNIT,
+       __NFTA_LIMIT_MAX
+};
+#define NFTA_LIMIT_MAX         (__NFTA_LIMIT_MAX - 1)
+
+/**
+ * enum nft_counter_attributes - nf_tables counter expression netlink attributes
+ *
+ * @NFTA_COUNTER_BYTES: number of bytes (NLA_U64)
+ * @NFTA_COUNTER_PACKETS: number of packets (NLA_U64)
+ */
+enum nft_counter_attributes {
+       NFTA_COUNTER_UNSPEC,
+       NFTA_COUNTER_BYTES,
+       NFTA_COUNTER_PACKETS,
+       __NFTA_COUNTER_MAX
+};
+#define NFTA_COUNTER_MAX       (__NFTA_COUNTER_MAX - 1)
+
+/**
+ * enum nft_log_attributes - nf_tables log expression netlink attributes
+ *
+ * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
+ * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
+ * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
+ * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ */
+enum nft_log_attributes {
+       NFTA_LOG_UNSPEC,
+       NFTA_LOG_GROUP,
+       NFTA_LOG_PREFIX,
+       NFTA_LOG_SNAPLEN,
+       NFTA_LOG_QTHRESHOLD,
+       __NFTA_LOG_MAX
+};
+#define NFTA_LOG_MAX           (__NFTA_LOG_MAX - 1)
+
+/**
+ * enum nft_reject_types - nf_tables reject expression reject types
+ *
+ * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
+ * @NFT_REJECT_TCP_RST: reject using TCP RST
+ */
+enum nft_reject_types {
+       NFT_REJECT_ICMP_UNREACH,
+       NFT_REJECT_TCP_RST,
+};
+
+/**
+ * enum nft_reject_attributes - nf_tables reject expression netlink attributes
+ *
+ * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
+ * @NFTA_REJECT_ICMP_CODE: ICMP code to use (NLA_U8)
+ */
+enum nft_reject_attributes {
+       NFTA_REJECT_UNSPEC,
+       NFTA_REJECT_TYPE,
+       NFTA_REJECT_ICMP_CODE,
+       __NFTA_REJECT_MAX
+};
+#define NFTA_REJECT_MAX                (__NFTA_REJECT_MAX - 1)
+
+/**
+ * enum nft_nat_types - nf_tables nat expression NAT types
+ *
+ * @NFT_NAT_SNAT: source NAT
+ * @NFT_NAT_DNAT: destination NAT
+ */
+enum nft_nat_types {
+       NFT_NAT_SNAT,
+       NFT_NAT_DNAT,
+};
+
+/**
+ * enum nft_nat_attributes - nf_tables nat expression netlink attributes
+ *
+ * @NFTA_NAT_TYPE: NAT type (NLA_U32: nft_nat_types)
+ * @NFTA_NAT_FAMILY: NAT family (NLA_U32)
+ * @NFTA_NAT_REG_ADDR_MIN: source register of address range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ */
+enum nft_nat_attributes {
+       NFTA_NAT_UNSPEC,
+       NFTA_NAT_TYPE,
+       NFTA_NAT_FAMILY,
+       NFTA_NAT_REG_ADDR_MIN,
+       NFTA_NAT_REG_ADDR_MAX,
+       NFTA_NAT_REG_PROTO_MIN,
+       NFTA_NAT_REG_PROTO_MAX,
+       __NFTA_NAT_MAX
+};
+#define NFTA_NAT_MAX           (__NFTA_NAT_MAX - 1)
+
+#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_tables_compat.h b/include/uapi/linux/netfilter/nf_tables_compat.h
new file mode 100644 (file)
index 0000000..8310f5f
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef _NFT_COMPAT_NFNETLINK_H_
+#define _NFT_COMPAT_NFNETLINK_H_
+
+enum nft_target_attributes {
+       NFTA_TARGET_UNSPEC,
+       NFTA_TARGET_NAME,
+       NFTA_TARGET_REV,
+       NFTA_TARGET_INFO,
+       __NFTA_TARGET_MAX
+};
+#define NFTA_TARGET_MAX                (__NFTA_TARGET_MAX - 1)
+
+enum nft_match_attributes {
+       NFTA_MATCH_UNSPEC,
+       NFTA_MATCH_NAME,
+       NFTA_MATCH_REV,
+       NFTA_MATCH_INFO,
+       __NFTA_MATCH_MAX
+};
+#define NFTA_MATCH_MAX         (__NFTA_MATCH_MAX - 1)
+
+#define NFT_COMPAT_NAME_MAX    32
+
+enum {
+       NFNL_MSG_COMPAT_GET,
+       NFNL_MSG_COMPAT_MAX
+};
+
+enum {
+       NFTA_COMPAT_UNSPEC = 0,
+       NFTA_COMPAT_NAME,
+       NFTA_COMPAT_REV,
+       NFTA_COMPAT_TYPE,
+       __NFTA_COMPAT_MAX,
+};
+#define NFTA_COMPAT_MAX (__NFTA_COMPAT_MAX - 1)
+
+#endif
index 4a4efafad5f46664e1f60914eb52d8eb00805b84..596ddd45253c02b1f611c0655b649e651109485a 100644 (file)
@@ -18,6 +18,8 @@ enum nfnetlink_groups {
 #define NFNLGRP_CONNTRACK_EXP_UPDATE   NFNLGRP_CONNTRACK_EXP_UPDATE
        NFNLGRP_CONNTRACK_EXP_DESTROY,
 #define NFNLGRP_CONNTRACK_EXP_DESTROY  NFNLGRP_CONNTRACK_EXP_DESTROY
+       NFNLGRP_NFTABLES,
+#define NFNLGRP_NFTABLES                NFNLGRP_NFTABLES
        __NFNLGRP_MAX,
 };
 #define NFNLGRP_MAX    (__NFNLGRP_MAX - 1)
@@ -51,6 +53,12 @@ struct nfgenmsg {
 #define NFNL_SUBSYS_ACCT               7
 #define NFNL_SUBSYS_CTNETLINK_TIMEOUT  8
 #define NFNL_SUBSYS_CTHELPER           9
-#define NFNL_SUBSYS_COUNT              10
+#define NFNL_SUBSYS_NFTABLES           10
+#define NFNL_SUBSYS_NFT_COMPAT         11
+#define NFNL_SUBSYS_COUNT              12
+
+/* Reserved control nfnetlink messages */
+#define NFNL_MSG_BATCH_BEGIN           NLMSG_MIN_TYPE
+#define NFNL_MSG_BATCH_END             NLMSG_MIN_TYPE+1
 
 #endif /* _UAPI_NFNETLINK_H */
index a2810a7c5e3002d63c9651e25830431a39d51abb..1ab0b97b3a1e68336485b2679045024e86ff5444 100644 (file)
@@ -6,6 +6,8 @@ enum ctnl_timeout_msg_types {
        IPCTNL_MSG_TIMEOUT_NEW,
        IPCTNL_MSG_TIMEOUT_GET,
        IPCTNL_MSG_TIMEOUT_DELETE,
+       IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+       IPCTNL_MSG_TIMEOUT_DEFAULT_GET,
 
        IPCTNL_MSG_TIMEOUT_MAX
 };
index baa7852468ef6eced239446f81b7b88b765e083e..0890556f779e489c16aa57490f3d4fef97e7e86e 100644 (file)
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
-#define  PCI_MSIX_FLAGS_BIRMASK        (7 << 0)   /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X entry's format */
 #define  PCI_EXP_DEVCAP2_OBFF_MSG      0x00040000 /* New message signaling */
 #define  PCI_EXP_DEVCAP2_OBFF_WAKE     0x00080000 /* Re-use WAKE# for OBFF */
 #define PCI_EXP_DEVCTL2                40      /* Device Control 2 */
-#define  PCI_EXP_DEVCTL2_ARI           0x20    /* Alternative Routing-ID */
+#define  PCI_EXP_DEVCTL2_COMP_TIMEOUT  0x000f  /* Completion Timeout Value */
+#define  PCI_EXP_DEVCTL2_ARI           0x0020  /* Alternative Routing-ID */
 #define  PCI_EXP_DEVCTL2_IDO_REQ_EN    0x0100  /* Allow IDO for requests */
 #define  PCI_EXP_DEVCTL2_IDO_CMP_EN    0x0200  /* Allow IDO for completions */
 #define  PCI_EXP_DEVCTL2_LTR_EN                0x0400  /* Enable LTR mechanism */
index 40a1fb8073961425249d50110a6de04f856feac9..009a655a5d354c51e20fb34cb12ca653e94f9b71 100644 (file)
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
        union {
                __u64   capabilities;
                struct {
-                       __u64   cap_usr_time            : 1,
-                               cap_usr_rdpmc           : 1,
-                               cap_usr_time_zero       : 1,
-                               cap_____res             : 61;
+                       __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+                               cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
+
+                               cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
+                               cap_user_time           : 1, /* The time_* fields are used */
+                               cap_user_time_zero      : 1, /* The time_zero field is used */
+                               cap_____res             : 59;
                };
        };
 
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
         *               ((rem * time_mult) >> time_shift);
         */
        __u64   time_zero;
+       __u32   size;                   /* Header size up to __reserved[] fields. */
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u64   __reserved[119];        /* align to 1k */
+       __u8    __reserved[118*8+4];    /* align to 1k. */
 
        /*
         * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
         *      u64                             len;
         *      u64                             pgoff;
         *      char                            filename[];
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_MMAP                        = 1,
index 9b829134d42243bdfc6816f9272b75ea82c9b934..f2624b549e6187155c2f6677d308b2bfc3e60acd 100644 (file)
@@ -357,6 +357,8 @@ enum {
        TCA_HTB_CTAB,
        TCA_HTB_RTAB,
        TCA_HTB_DIRECT_QLEN,
+       TCA_HTB_RATE64,
+       TCA_HTB_CEIL64,
        __TCA_HTB_MAX,
 };
 
index 0623ec4e728f09550fd8c5e0e464f4a771265bdb..56f121605c998c28e8a173067e94e24ec7d9c653 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += tc_csum.h
+header-y += tc_defact.h
 header-y += tc_gact.h
 header-y += tc_ipt.h
 header-y += tc_mirred.h
similarity index 75%
rename from include/linux/tc_act/tc_defact.h
rename to include/uapi/linux/tc_act/tc_defact.h
index 6f65d07c7ce25cf343a2c80ecd6ba70562331bfc..17dddb40f74043e40ac09e36e15c925c5aa13df0 100644 (file)
@@ -6,7 +6,7 @@
 struct tc_defact {
        tc_gen;
 };
-                                                                                
+
 enum {
        TCA_DEF_UNSPEC,
        TCA_DEF_TM,
index 0b233c56b0e402ef75f691b5dcf6c6f1cc87a01b..e3ddd86c90a68610a20625729a99cf8effea8f31 100644 (file)
@@ -87,8 +87,10 @@ enum {
        IB_USER_VERBS_CMD_CLOSE_XRCD,
        IB_USER_VERBS_CMD_CREATE_XSRQ,
        IB_USER_VERBS_CMD_OPEN_QP,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_CMD_DESTROY_FLOW
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 /*
@@ -126,6 +128,7 @@ struct ib_uverbs_cmd_hdr {
        __u16 out_words;
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_uverbs_cmd_hdr_ex {
        __u32 command;
        __u16 in_words;
@@ -134,6 +137,7 @@ struct ib_uverbs_cmd_hdr_ex {
        __u16 provider_out_words;
        __u32 cmd_hdr_reserved;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_get_context {
        __u64 response;
@@ -696,6 +700,7 @@ struct ib_uverbs_detach_mcast {
        __u64 driver_data[0];
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_kern_eth_filter {
        __u8  dst_mac[6];
        __u8  src_mac[6];
@@ -780,6 +785,7 @@ struct ib_uverbs_destroy_flow  {
        __u32 comp_mask;
        __u32 flow_handle;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_create_srq {
        __u64 response;
index eb262e3324d2fca44da8c88bf3127fd4da65b118..c50061db609893a924160556203f7dcf73e24700 100644 (file)
  * node as before.
  */
 
+/*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
 /*
  * This is the 'wire' format for packets:
  *  Request 1: xen_netif_tx_request  -- XEN_NETTXF_* (any flags)
@@ -95,8 +109,10 @@ struct xen_netif_tx_request {
 #define _XEN_NETIF_EXTRA_FLAG_MORE     (0)
 #define  XEN_NETIF_EXTRA_FLAG_MORE     (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
 
-/* GSO types - only TCPv4 currently supported. */
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE                (0)
 #define XEN_NETIF_GSO_TYPE_TCPV4       (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6       (2)
 
 /*
  * This structure needs to fit within both netif_tx_request and
index 3ecd8a1178f102d832cdf3b4af0a908997ea648b..d9887456007a83b212eb41dabf95f7d8c781c6b4 100644 (file)
@@ -284,7 +284,7 @@ config AUDIT
 
 config AUDITSYSCALL
        bool "Enable system-call auditing support"
-       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
+       depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
index af310afbef2867e987973cc341a7b418f7413068..edee99f735746f4bb3b6a8fb17794919469f4888 100644 (file)
@@ -76,6 +76,7 @@
 #include <linux/elevator.h>
 #include <linux/sched_clock.h>
 #include <linux/context_tracking.h>
+#include <linux/random.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -135,6 +136,13 @@ static char *static_command_line;
 static char *execute_command;
 static char *ramdisk_execute_command;
 
+/*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly = false;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
 /*
  * If set, this is an indication to the drivers that reset the underlying
  * device before going ahead with the initialization otherwise driver might
@@ -780,6 +788,7 @@ static void __init do_basic_setup(void)
        do_ctors();
        usermodehelper_enable();
        do_initcalls();
+       random_int_secret_init();
 }
 
 static void __init do_pre_smp_initcalls(void)
index b0d541d426771caec217961b031e3de191135ced..558aa91186b6ced1a27b1e05b65c5ee129e0a175 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
        ipc_rmid(&msg_ids(ns), &s->q_perm);
 }
 
+static void msg_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct msg_queue *msq = ipc_rcu_to_struct(p);
+
+       security_msg_queue_free(msq);
+       ipc_rcu_free(head);
+}
+
 /**
  * newque - Create a new msg queue
  * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        msq->q_perm.security = NULL;
        retval = security_msg_queue_alloc(msq);
        if (retval) {
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                return retval;
        }
 
        /* ipc_addid() locks msq upon success. */
        id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
        if (id < 0) {
-               security_msg_queue_free(msq);
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, msg_rcu_free);
                return id;
        }
 
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
                free_msg(msg);
        }
        atomic_sub(msq->q_cbytes, &ns->msg_bytes);
-       security_msg_queue_free(msq);
-       ipc_rcu_putref(msq);
+       ipc_rcu_putref(msq, msg_rcu_free);
 }
 
 /*
@@ -688,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                if (ipcperms(ns, &msq->q_perm, S_IWUGO))
                        goto out_unlock0;
 
+               /* raced with RMID? */
+               if (msq->q_perm.deleted) {
+                       err = -EIDRM;
+                       goto out_unlock0;
+               }
+
                err = security_msg_queue_msgsnd(msq, msg, msgflg);
                if (err)
                        goto out_unlock0;
@@ -717,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                rcu_read_lock();
                ipc_lock_object(&msq->q_perm);
 
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                if (msq->q_perm.deleted) {
                        err = -EIDRM;
                        goto out_unlock0;
@@ -894,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
                        goto out_unlock1;
 
                ipc_lock_object(&msq->q_perm);
+
+               /* raced with RMID? */
+               if (msq->q_perm.deleted) {
+                       msg = ERR_PTR(-EIDRM);
+                       goto out_unlock0;
+               }
+
                msg = find_msg(msq, &msgtyp, mode);
                if (!IS_ERR(msg)) {
                        /*
index 69b6a21f38441aa437a8f79cb53ae4c4a6a520a1..db9d241af133d770cb0a95a22cacf257f79b1215 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,71 +243,122 @@ static void merge_queues(struct sem_array *sma)
        }
 }
 
+static void sem_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct sem_array *sma = ipc_rcu_to_struct(p);
+
+       security_sem_free(sma);
+       ipc_rcu_free(head);
+}
+
+/*
+ * Wait until all currently ongoing simple ops have completed.
+ * Caller must own sem_perm.lock.
+ * New simple ops cannot start, because simple ops first check
+ * that sem_perm.lock is free.
+ * that a) sem_perm.lock is free and b) complex_count is 0.
+ */
+static void sem_wait_array(struct sem_array *sma)
+{
+       int i;
+       struct sem *sem;
+
+       if (sma->complex_count)  {
+               /* The thread that increased sma->complex_count waited on
+                * all sem->lock locks. Thus we don't need to wait again.
+                */
+               return;
+       }
+
+       for (i = 0; i < sma->sem_nsems; i++) {
+               sem = sma->sem_base + i;
+               spin_unlock_wait(&sem->lock);
+       }
+}
+
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
  * Otherwise, lock the entire semaphore array, since we either have
  * multiple semaphores in our own semops, or we need to look at
  * semaphores from other pending complex operations.
- *
- * Carefully guard against sma->complex_count changing between zero
- * and non-zero while we are spinning for the lock. The value of
- * sma->complex_count cannot change while we are holding the lock,
- * so sem_unlock should be fine.
- *
- * The global lock path checks that all the local locks have been released,
- * checking each local lock once. This means that the local lock paths
- * cannot start their critical sections while the global lock is held.
  */
 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                              int nsops)
 {
-       int locknum;
- again:
-       if (nsops == 1 && !sma->complex_count) {
-               struct sem *sem = sma->sem_base + sops->sem_num;
+       struct sem *sem;
 
-               /* Lock just the semaphore we are interested in. */
-               spin_lock(&sem->lock);
+       if (nsops != 1) {
+               /* Complex operation - acquire a full lock */
+               ipc_lock_object(&sma->sem_perm);
 
-               /*
-                * If sma->complex_count was set while we were spinning,
-                * we may need to look at things we did not lock here.
+               /* And wait until all simple ops that are processed
+                * right now have dropped their locks.
                 */
-               if (unlikely(sma->complex_count)) {
-                       spin_unlock(&sem->lock);
-                       goto lock_array;
-               }
+               sem_wait_array(sma);
+               return -1;
+       }
+
+       /*
+        * Only one semaphore affected - try to optimize locking.
+        * The rules are:
+        * - optimized locking is possible if no complex operation
+        *   is either enqueued or processed right now.
+        * - The test for enqueued complex ops is simple:
+        *      sma->complex_count != 0
+        * - Testing for complex ops that are processed right now is
+        *   a bit more difficult. Complex ops acquire the full lock
+        *   and first wait that the running simple ops have completed.
+        *   (see above)
+        *   Thus: If we own a simple lock and the global lock is free
+        *      and complex_count is now 0, then it will stay 0 and
+        *      thus just locking sem->lock is sufficient.
+        */
+       sem = sma->sem_base + sops->sem_num;
 
+       if (sma->complex_count == 0) {
                /*
-                * Another process is holding the global lock on the
-                * sem_array; we cannot enter our critical section,
-                * but have to wait for the global lock to be released.
+                * It appears that no complex operation is around.
+                * Acquire the per-semaphore lock.
                 */
-               if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
-                       spin_unlock(&sem->lock);
-                       spin_unlock_wait(&sma->sem_perm.lock);
-                       goto again;
+               spin_lock(&sem->lock);
+
+               /* Then check that the global lock is free */
+               if (!spin_is_locked(&sma->sem_perm.lock)) {
+                       /* spin_is_locked() is not a memory barrier */
+                       smp_mb();
+
+                       /* Now repeat the test of complex_count:
+                        * It can't change anymore until we drop sem->lock.
+                        * Thus: if is now 0, then it will stay 0.
+                        */
+                       if (sma->complex_count == 0) {
+                               /* fast path successful! */
+                               return sops->sem_num;
+                       }
                }
+               spin_unlock(&sem->lock);
+       }
 
-               locknum = sops->sem_num;
+       /* slow path: acquire the full lock */
+       ipc_lock_object(&sma->sem_perm);
+
+       if (sma->complex_count == 0) {
+               /* False alarm:
+                * There is no complex operation, thus we can switch
+                * back to the fast path.
+                */
+               spin_lock(&sem->lock);
+               ipc_unlock_object(&sma->sem_perm);
+               return sops->sem_num;
        } else {
-               int i;
-               /*
-                * Lock the semaphore array, and wait for all of the
-                * individual semaphore locks to go away.  The code
-                * above ensures no new single-lock holders will enter
-                * their critical section while the array lock is held.
+               /* Not a false alarm, thus complete the sequence for a
+                * full lock.
                 */
- lock_array:
-               ipc_lock_object(&sma->sem_perm);
-               for (i = 0; i < sma->sem_nsems; i++) {
-                       struct sem *sem = sma->sem_base + i;
-                       spin_unlock_wait(&sem->lock);
-               }
-               locknum = -1;
+               sem_wait_array(sma);
+               return -1;
        }
-       return locknum;
 }
 
 static inline void sem_unlock(struct sem_array *sma, int locknum)
@@ -374,12 +425,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
 static inline void sem_lock_and_putref(struct sem_array *sma)
 {
        sem_lock(sma, NULL, -1);
-       ipc_rcu_putref(sma);
-}
-
-static inline void sem_putref(struct sem_array *sma)
-{
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, ipc_rcu_free);
 }
 
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +504,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        sma->sem_perm.security = NULL;
        retval = security_sem_alloc(sma);
        if (retval) {
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return retval;
        }
 
        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
        if (id < 0) {
-               security_sem_free(sma);
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, sem_rcu_free);
                return id;
        }
        ns->used_sems += nsems;
@@ -872,6 +917,24 @@ again:
        return semop_completed;
 }
 
+/**
+ * set_semotime(sma, sops) - set sem_otime
+ * @sma: semaphore array
+ * @sops: operations that modified the array, may be NULL
+ *
+ * sem_otime is replicated to avoid cache line trashing.
+ * This function sets one instance to the current time.
+ */
+static void set_semotime(struct sem_array *sma, struct sembuf *sops)
+{
+       if (sops == NULL) {
+               sma->sem_base[0].sem_otime = get_seconds();
+       } else {
+               sma->sem_base[sops[0].sem_num].sem_otime =
+                                                       get_seconds();
+       }
+}
+
 /**
  * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
  * @sma: semaphore array
@@ -922,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                        }
                }
        }
-       if (otime) {
-               if (sops == NULL) {
-                       sma->sem_base[0].sem_otime = get_seconds();
-               } else {
-                       sma->sem_base[sops[0].sem_num].sem_otime =
-                                                               get_seconds();
-               }
-       }
+       if (otime)
+               set_semotime(sma, sops);
 }
 
-
 /* The following counts are associated to each semaphore:
  *   semncnt        number of tasks waiting on semval being nonzero
  *   semzcnt        number of tasks waiting on semval being zero
@@ -1047,8 +1103,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 
        wake_up_sem_queue_do(&tasks);
        ns->used_sems -= sma->sem_nsems;
-       security_sem_free(sma);
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, sem_rcu_free);
 }
 
 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1227,6 +1282,12 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 
        sem_lock(sma, NULL, -1);
 
+       if (sma->sem_perm.deleted) {
+               sem_unlock(sma, -1);
+               rcu_read_unlock();
+               return -EIDRM;
+       }
+
        curr = &sma->sem_base[semnum];
 
        ipc_assert_locked_object(&sma->sem_perm);
@@ -1281,28 +1342,28 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                int i;
 
                sem_lock(sma, NULL, -1);
+               if (sma->sem_perm.deleted) {
+                       err = -EIDRM;
+                       goto out_unlock;
+               }
                if(nsems > SEMMSL_FAST) {
                        if (!ipc_rcu_getref(sma)) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                        sem_unlock(sma, -1);
                        rcu_read_unlock();
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
 
                        rcu_read_lock();
                        sem_lock_and_putref(sma);
                        if (sma->sem_perm.deleted) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                }
                for (i = 0; i < sma->sem_nsems; i++)
@@ -1320,28 +1381,28 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                struct sem_undo *un;
 
                if (!ipc_rcu_getref(sma)) {
-                       rcu_read_unlock();
-                       return -EIDRM;
+                       err = -EIDRM;
+                       goto out_rcu_wakeup;
                }
                rcu_read_unlock();
 
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
-                       sem_putref(sma);
+                       ipc_rcu_putref(sma, ipc_rcu_free);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                err = -ERANGE;
                                goto out_free;
                        }
@@ -1349,10 +1410,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                rcu_read_lock();
                sem_lock_and_putref(sma);
                if (sma->sem_perm.deleted) {
-                       sem_unlock(sma, -1);
-                       rcu_read_unlock();
                        err = -EIDRM;
-                       goto out_free;
+                       goto out_unlock;
                }
 
                for (i = 0; i < nsems; i++)
@@ -1376,6 +1435,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                goto out_rcu_wakeup;
 
        sem_lock(sma, NULL, -1);
+       if (sma->sem_perm.deleted) {
+               err = -EIDRM;
+               goto out_unlock;
+       }
        curr = &sma->sem_base[semnum];
 
        switch (cmd) {
@@ -1629,7 +1692,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
        /* step 2: allocate new undo structure */
        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               sem_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1781,6 +1844,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        if (error)
                goto out_rcu_wakeup;
 
+       error = -EIDRM;
+       locknum = sem_lock(sma, sops, nsops);
+       if (sma->sem_perm.deleted)
+               goto out_unlock_free;
        /*
         * semid identifiers are not unique - find_alloc_undo may have
         * allocated an undo structure, it was invalidated by an RMID
@@ -1788,19 +1855,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
         * This case can be detected checking un->semid. The existence of
         * "un" itself is guaranteed by rcu.
         */
-       error = -EIDRM;
-       locknum = sem_lock(sma, sops, nsops);
        if (un && un->semid == -1)
                goto out_unlock_free;
 
        error = perform_atomic_semop(sma, sops, nsops, un,
                                        task_tgid_vnr(current));
-       if (error <= 0) {
-               if (alter && error == 0)
+       if (error == 0) {
+               /* If the operation was successful, then do
+                * the required updates.
+                */
+               if (alter)
                        do_smart_update(sma, sops, nsops, 1, &tasks);
-
-               goto out_unlock_free;
+               else
+                       set_semotime(sma, sops);
        }
+       if (error <= 0)
+               goto out_unlock_free;
 
        /* We need to sleep on this operation, so we put the current
         * task into the pending queue and go to sleep.
@@ -1997,6 +2067,12 @@ void exit_sem(struct task_struct *tsk)
                }
 
                sem_lock(sma, NULL, -1);
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (sma->sem_perm.deleted) {
+                       sem_unlock(sma, -1);
+                       rcu_read_unlock();
+                       continue;
+               }
                un = __lookup_undo(ulp, semid);
                if (un == NULL) {
                        /* exit_sem raced with IPC_RMID+semget() that created
@@ -2059,6 +2135,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        struct sem_array *sma = it;
        time_t sem_otime;
 
+       /*
+        * The proc interface isn't aware of sem_lock(), it calls
+        * ipc_lock_object() directly (in sysvipc_find_ipc).
+        * In order to stay compatible with sem_lock(), we must wait until
+        * all simple semop() calls have left their critical regions.
+        */
+       sem_wait_array(sma);
+
        sem_otime = get_semotime(sma);
 
        return seq_printf(s,
index 2821cdf93adb39ac83f8a604e6f487590bfe01f7..d69739610fd4384323004c46782116d54db6bbd5 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
        ipc_lock_object(&ipcp->shm_perm);
 }
 
+static void shm_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct shmid_kernel *shp = ipc_rcu_to_struct(p);
+
+       security_shm_free(shp);
+       ipc_rcu_free(head);
+}
+
 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 {
        ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
                user_shm_unlock(file_inode(shp->shm_file)->i_size,
                                                shp->mlock_user);
        fput (shp->shm_file);
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
 }
 
 /*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_perm.security = NULL;
        error = security_shm_alloc(shp);
        if (error) {
-               ipc_rcu_putref(shp);
+               ipc_rcu_putref(shp, ipc_rcu_free);
                return error;
        }
 
@@ -566,8 +574,7 @@ no_id:
                user_shm_unlock(size, shp->mlock_user);
        fput(file);
 no_file:
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
        return error;
 }
 
index e829da9ed01f3dbe2973fd232a4f2b889f401c9c..7684f41bce76a9b430f04b7e4e77810b48798a42 100644 (file)
  *            Pavel Emelianov <xemul@openvz.org>
  *
  * General sysv ipc locking scheme:
- *  when doing ipc id lookups, take the ids->rwsem
- *      rcu_read_lock()
- *          obtain the ipc object (kern_ipc_perm)
- *          perform security, capabilities, auditing and permission checks, etc.
- *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
- *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
+ *     rcu_read_lock()
+ *          obtain the ipc object (kern_ipc_perm) by looking up the id in an idr
+ *         tree.
+ *         - perform initial checks (capabilities, auditing and permission,
+ *           etc).
+ *         - perform read-only operations, such as STAT, INFO commands.
+ *           acquire the ipc lock (kern_ipc_perm.lock) through
+ *           ipc_lock_object()
+ *             - perform data updates, such as SET, RMID commands and
+ *               mechanism-specific operations (semop/semtimedop,
+ *               msgsnd/msgrcv, shmat/shmdt).
+ *         drop the ipc lock, through ipc_unlock_object().
+ *     rcu_read_unlock()
+ *
+ *  The ids->rwsem must be taken when:
+ *     - creating, removing and iterating the existing entries in ipc
+ *       identifier sets.
+ *     - iterating through files under /proc/sysvipc/
+ *
+ *  Note that sems have a special fast path that avoids kern_ipc_perm.lock -
+ *  see sem_lock().
  */
 
 #include <linux/mm.h>
@@ -474,11 +489,6 @@ void ipc_free(void* ptr, int size)
                kfree(ptr);
 }
 
-struct ipc_rcu {
-       struct rcu_head rcu;
-       atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
 /**
  *     ipc_rcu_alloc   -       allocate ipc and rcu space 
  *     @size: size desired
@@ -505,27 +515,24 @@ int ipc_rcu_getref(void *ptr)
        return atomic_inc_not_zero(&p->refcount);
 }
 
-/**
- * ipc_schedule_free - free ipc + rcu space
- * @head: RCU callback structure for queued work
- */
-static void ipc_schedule_free(struct rcu_head *head)
-{
-       vfree(container_of(head, struct ipc_rcu, rcu));
-}
-
-void ipc_rcu_putref(void *ptr)
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
 {
        struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
 
        if (!atomic_dec_and_test(&p->refcount))
                return;
 
-       if (is_vmalloc_addr(ptr)) {
-               call_rcu(&p->rcu, ipc_schedule_free);
-       } else {
-               kfree_rcu(p, rcu);
-       }
+       call_rcu(&p->rcu, func);
+}
+
+void ipc_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+
+       if (is_vmalloc_addr(p))
+               vfree(p);
+       else
+               kfree(p);
 }
 
 /**
index c5f3338ba1fa7913967c1bc7e9845e0561eeb027..f2f5036f2eeda9794bc545ac8045db3c56f7d425 100644 (file)
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
 static inline void shm_exit_ns(struct ipc_namespace *ns) { }
 #endif
 
+struct ipc_rcu {
+       struct rcu_head rcu;
+       atomic_t refcount;
+} ____cacheline_aligned_in_smp;
+
+#define ipc_rcu_to_struct(p)  ((void *)(p+1))
+
 /*
  * Structure that holds the parameters needed by the ipc operations
  * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
  */
 void* ipc_rcu_alloc(int size);
 int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr);
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
+void ipc_rcu_free(struct rcu_head *head);
 
 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
index 91e53d04b6a9e8841e697dcb290f1206468da21a..7b0e23a740ce345987c33f9e012302c24de0f4db 100644 (file)
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
                        sleep_time = timeout_start + audit_backlog_wait_time -
                                        jiffies;
-                       if ((long)sleep_time > 0)
+                       if ((long)sleep_time > 0) {
                                wait_for_auditd(sleep_time);
-                       continue;
+                               continue;
+                       }
                }
                if (audit_rate_check() && printk_ratelimit())
                        printk(KERN_WARNING
index 2418b6e71a854e187573ec12746481ce863e721a..8bd9cfdc70d7bc020dbc19a05813e2e49443b8e8 100644 (file)
@@ -2039,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
 
                /* @tsk either already exited or can't exit until the end */
                if (tsk->flags & PF_EXITING)
-                       continue;
+                       goto next;
 
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
@@ -2047,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                ent.cgrp = task_cgroup_from_root(tsk, root);
                /* nothing to do if this task is already in the cgroup */
                if (ent.cgrp == cgrp)
-                       continue;
+                       goto next;
                /*
                 * saying GFP_ATOMIC has no effect here because we did prealloc
                 * earlier, but it's good form to communicate our expectations.
@@ -2055,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-
+       next:
                if (!threadgroup)
                        break;
        } while_each_thread(leader, tsk);
@@ -3188,11 +3188,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* if first iteration, visit the leftmost descendant */
-       if (!pos) {
-               next = css_leftmost_descendant(root);
-               return next != root ? next : NULL;
-       }
+       /* if first iteration, visit leftmost descendant which may be @root */
+       if (!pos)
+               return css_leftmost_descendant(root);
 
        /* if we visited @root, we're done */
        if (pos == root)
index 247091bf0587a479594776be800db9164a10ae8b..859c8dfd78a1a5b296dd5c6c23d741e7a4c1e8cd 100644 (file)
@@ -50,6 +50,15 @@ void context_tracking_user_enter(void)
 {
        unsigned long flags;
 
+       /*
+        * Repeat the user_enter() check here because some archs may be calling
+        * this from asm and if no CPU needs context tracking, they shouldn't
+        * go further. Repeat the check here until they support the static key
+        * check.
+        */
+       if (!static_key_false(&context_tracking_enabled))
+               return;
+
        /*
         * Some contexts may involve an exception occuring in an irq,
         * leading to that nesting:
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
 {
        unsigned long flags;
 
+       if (!static_key_false(&context_tracking_enabled))
+               return;
+
        if (in_interrupt())
                return;
 
index dd236b66ca3a8ef894386e7dcb8c7f02dbba34ae..d49a9d29334cc4d67c24bad9814221a0371a6350 100644 (file)
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
        *running = ctx_time - event->tstamp_running;
 }
 
+static void perf_event_init_userpage(struct perf_event *event)
+{
+       struct perf_event_mmap_page *userpg;
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       userpg = rb->user_page;
+
+       /* Allow new userspace to detect that bit 0 is deprecated */
+       userpg->cap_bit0_is_deprecated = 1;
+       userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+       rcu_read_unlock();
+}
+
 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 }
@@ -4044,6 +4064,7 @@ again:
        ring_buffer_attach(event, rb);
        rcu_assign_pointer(event->rb, rb);
 
+       perf_event_init_userpage(event);
        perf_event_update_userpage(event);
 
 unlock:
@@ -7213,15 +7234,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
                perf_remove_from_context(event);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
-               list_add(&event->event_entry, &events);
+               list_add(&event->migrate_entry, &events);
        }
        mutex_unlock(&src_ctx->mutex);
 
        synchronize_rcu();
 
        mutex_lock(&dst_ctx->mutex);
-       list_for_each_entry_safe(event, tmp, &events, event_entry) {
-               list_del(&event->event_entry);
+       list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
+               list_del(&event->migrate_entry);
                if (event->state >= PERF_EVENT_STATE_OFF)
                        event->state = PERF_EVENT_STATE_INACTIVE;
                account_event_cpu(event, dst_cpu);
index 297a9247a3b394f8538db111817e8a88d3c345ab..9019f15deab201127065e4d6987677fdcdd63676 100644 (file)
@@ -58,6 +58,7 @@ static void jump_label_update(struct static_key *key, int enable);
 
 void static_key_slow_inc(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        if (atomic_inc_not_zero(&key->enabled))
                return;
 
@@ -103,12 +104,14 @@ static void jump_label_update_timeout(struct work_struct *work)
 
 void static_key_slow_dec(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        __static_key_slow_dec(key, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
 void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+       STATIC_KEY_CHECK_USE();
        __static_key_slow_dec(&key->key, key->timeout, &key->work);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
@@ -116,6 +119,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
+       STATIC_KEY_CHECK_USE();
        key->timeout = rl;
        INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
 }
@@ -212,6 +216,7 @@ void __init jump_label_init(void)
                key->next = NULL;
 #endif
        }
+       static_key_initialized = true;
        jump_label_unlock();
 }
 
index fb326365b69466158b03b726e53c4fe15448fa89..b086006c59e7c6957a51984a3ec101ea2db8525d 100644 (file)
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
        DECLARE_COMPLETION_ONSTACK(done);
        int retval = 0;
 
+       if (!sub_info->path) {
+               call_usermodehelper_freeinfo(sub_info);
+               return -EINVAL;
+       }
        helper_lock();
        if (!khelper_wq || usermodehelper_disabled) {
                retval = -EBUSY;
index 81c4e78c8f4cc0b79b89086c3255934079082380..c00d5b502aa487de0f09117c21dfa12eb8923e0c 100644 (file)
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
 
 
 STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
 STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
 STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
 STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
 
 int param_set_charp(const char *val, const struct kernel_param *kp)
index ebe5e80b10f8495a3d6459e0203821f987237df0..9b9a26698144e126486e162955e49561514b1ce8 100644 (file)
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
                         */
                        wake_up_process(ns->child_reaper);
                        break;
+               case PIDNS_HASH_ADDING:
+                       /* Handle a fork failure of the first process */
+                       WARN_ON(ns->child_reaper);
+                       ns->nr_hashed = 0;
+                       /* fall through */
                case 0:
                        schedule_work(&ns->proc_work);
                        break;
index d444c4e834f4526ec318f71b1076a02c909e34a8..2fac9cc79b3da6183dd56f5e3469a7de54d4167e 100644 (file)
@@ -178,6 +178,22 @@ config PM_SLEEP_DEBUG
        def_bool y
        depends on PM_DEBUG && PM_SLEEP
 
+config DPM_WATCHDOG
+       bool "Device suspend/resume watchdog"
+       depends on PM_DEBUG && PSTORE
+       ---help---
+         Sets up a watchdog timer to capture drivers that are
+         locked up attempting to suspend/resume a device.
+         A detected lockup causes system panic with message
+         captured in pstore device for inspection in subsequent
+         boot session.
+
+config DPM_WATCHDOG_TIMEOUT
+       int "Watchdog timeout in seconds"
+       range 1 120
+       default 12
+       depends on DPM_WATCHDOG
+
 config PM_TRACE
        bool
        help
index a394297f8b2f94ea806a545c6e92e4b7221d2f7e..8dff9b48075af3f61eeab3531b3ad706b88718e4 100644 (file)
@@ -558,30 +558,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
        if (count == sizeof(s32)) {
                if (copy_from_user(&value, buf, sizeof(s32)))
                        return -EFAULT;
-       } else if (count <= 11) { /* ASCII perhaps? */
-               char ascii_value[11];
-               unsigned long int ulval;
+       } else {
                int ret;
 
-               if (copy_from_user(ascii_value, buf, count))
-                       return -EFAULT;
-
-               if (count > 10) {
-                       if (ascii_value[10] == '\n')
-                               ascii_value[10] = '\0';
-                       else
-                               return -EINVAL;
-               } else {
-                       ascii_value[count] = '\0';
-               }
-               ret = kstrtoul(ascii_value, 16, &ulval);
-               if (ret) {
-                       pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
-                       return -EINVAL;
-               }
-               value = (s32)lower_32_bits(ulval);
-       } else {
-               return -EINVAL;
+               ret = kstrtos32_from_user(buf, count, 16, &value);
+               if (ret)
+                       return ret;
        }
 
        req = filp->private_data;
index 358a146fd4dacda5462f5ef29a8f797c91868c71..98c3b34a4cffcedeae812de0ddc947f874cef839 100644 (file)
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
        struct memory_bitmap *bm1, *bm2;
        int error = 0;
 
-       BUG_ON(forbidden_pages_map || free_pages_map);
+       if (forbidden_pages_map && free_pages_map)
+               return 0;
+       else
+               BUG_ON(forbidden_pages_map || free_pages_map);
 
        bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
        if (!bm1)
index 72e8f4fd616dee0cec880d442e33127602601f6c..24850270c8024948d60c0f827457b18d2de2d9a7 100644 (file)
@@ -36,9 +36,10 @@ static struct snapshot_data {
        struct snapshot_handle handle;
        int swap;
        int mode;
-       char frozen;
-       char ready;
-       char platform_support;
+       bool frozen;
+       bool ready;
+       bool platform_support;
+       bool free_bitmaps;
 } snapshot_state;
 
 atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,15 +83,19 @@ static int snapshot_open(struct inode *inode, struct file *filp)
                data->swap = -1;
                data->mode = O_WRONLY;
                error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+               if (!error) {
+                       error = create_basic_memory_bitmaps();
+                       data->free_bitmaps = !error;
+               }
                if (error)
                        pm_notifier_call_chain(PM_POST_RESTORE);
        }
        if (error)
                atomic_inc(&snapshot_device_available);
 
-       data->frozen = 0;
-       data->ready = 0;
-       data->platform_support = 0;
+       data->frozen = false;
+       data->ready = false;
+       data->platform_support = false;
 
  Unlock:
        unlock_system_sleep();
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
                pm_restore_gfp_mask();
                free_basic_memory_bitmaps();
                thaw_processes();
+       } else if (data->free_bitmaps) {
+               free_basic_memory_bitmaps();
        }
        pm_notifier_call_chain(data->mode == O_RDONLY ?
                        PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -222,7 +229,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                if (error)
                        thaw_processes();
                else
-                       data->frozen = 1;
+                       data->frozen = true;
 
                break;
 
@@ -231,8 +238,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                        break;
                pm_restore_gfp_mask();
                free_basic_memory_bitmaps();
+               data->free_bitmaps = false;
                thaw_processes();
-               data->frozen = 0;
+               data->frozen = false;
                break;
 
        case SNAPSHOT_CREATE_IMAGE:
@@ -262,7 +270,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
        case SNAPSHOT_FREE:
                swsusp_free();
                memset(&data->handle, 0, sizeof(struct snapshot_handle));
-               data->ready = 0;
+               data->ready = false;
                /*
                 * It is necessary to thaw kernel threads here, because
                 * SNAPSHOT_CREATE_IMAGE may be invoked directly after
@@ -326,7 +334,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                 * PM_HIBERNATION_PREPARE
                 */
                error = suspend_devices_and_enter(PM_SUSPEND_MEM);
-               data->ready = 0;
+               data->ready = false;
                break;
 
        case SNAPSHOT_PLATFORM_SUPPORT:
index 269ed9384cc4284e9cf6043bd00667ea582c4369..f813b3474646c5b320a19d9a8997349bdd14d68e 100644 (file)
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
 #endif
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
 
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line).  This is needed so that we can
+ * suppress DMI scanning for reboot quirks.  Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
 int reboot_cpu;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
index 11cd13667359862c58872a9ce6091391a1d40b86..7c70201fbc61aef012d5b2536600d898602bab0a 100644 (file)
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        }
 
        if (!se) {
-               cfs_rq->h_load = rq->avg.load_avg_contrib;
+               cfs_rq->h_load = cfs_rq->runnable_load_avg;
                cfs_rq->last_h_load_update = now;
        }
 
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
                (busiest->load_per_task * SCHED_POWER_SCALE) /
                busiest->group_power;
 
-       if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
-           (scaled_busy_load_per_task * imbn)) {
+       if (busiest->avg_load + scaled_busy_load_per_task >=
+           local->avg_load + (scaled_busy_load_per_task * imbn)) {
                env->imbalance = busiest->load_per_task;
                return;
        }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         * max load less than avg load(as we skip the groups at or below
         * its cpu_power, while calculating max_load..)
         */
-       if (busiest->avg_load < sds->avg_load) {
+       if (busiest->avg_load <= sds->avg_load ||
+           local->avg_load >= sds->avg_load) {
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
index 53cc09ceb0b869cf407fdc9d8195f265cce4c011..d7d498d8cc4f8185954a220c66071cbc5761b917 100644 (file)
@@ -328,10 +328,19 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (!force_irqthreads)
-               __do_softirq();
-       else
+       if (!force_irqthreads) {
+               /*
+                * We can safely execute softirq on the current stack if
+                * it is the irq stack, because it should be near empty
+                * at this stage. But we have no way to know if the arch
+                * calls irq_exit() on the irq stack. So call softirq
+                * in its own stack to prevent from any overrun on top
+                * of a potentially deep task stack.
+                */
+               do_softirq();
+       } else {
                wakeup_softirqd();
+       }
 }
 
 static inline void tick_irq_exit(void)
index 51c4f34d258ea397266e0dd1a96a16436415a38f..4431610f049ac77888adefe3a335d47d4d232939 100644 (file)
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
        .unpark                 = watchdog_enable,
 };
 
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+       struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+       int ret;
+
+       /*
+        * No need to cancel and restart hrtimer if it is currently executing
+        * because it will reprogram itself with the new period now.
+        * We should never see it unqueued here because we are running per-cpu
+        * with interrupts disabled.
+        */
+       ret = hrtimer_try_to_cancel(hrtimer);
+       if (ret == 1)
+               hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+                               HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+       struct call_single_data data = {.func = restart_watchdog_hrtimer};
+       /*
+        * Make sure that perf event counter will adopt to a new
+        * sampling period. Updating the sampling period directly would
+        * be much nicer but we do not have an API for that now so
+        * let's use a big hammer.
+        * Hrtimer will adopt the new period on the next tick but this
+        * might be late already so we have to restart the timer as well.
+        */
+       watchdog_nmi_disable(cpu);
+       __smp_call_function_single(cpu, &data, 1);
+       watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       preempt_disable();
+       for_each_online_cpu(cpu)
+               update_timers(cpu);
+       preempt_enable();
+       put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
 {
        int err = 0;
 
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
                        pr_err("Failed to create watchdog threads, disabled\n");
                else
                        watchdog_running = 1;
+       } else if (sample_period_changed) {
+               update_timers_all_cpus();
        }
 
        return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int err, old_thresh, old_enabled;
+       static DEFINE_MUTEX(watchdog_proc_mutex);
 
+       mutex_lock(&watchdog_proc_mutex);
        old_thresh = ACCESS_ONCE(watchdog_thresh);
        old_enabled = ACCESS_ONCE(watchdog_user_enabled);
 
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (err || !write)
-               return err;
+               goto out;
 
        set_sample_period();
        /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
         * watchdog_*_all_cpus() function takes care of this.
         */
        if (watchdog_user_enabled && watchdog_thresh)
-               err = watchdog_enable_all_cpus();
+               err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
        else
                watchdog_disable_all_cpus();
 
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                watchdog_thresh = old_thresh;
                watchdog_user_enabled = old_enabled;
        }
-
+out:
+       mutex_unlock(&watchdog_proc_mutex);
        return err;
 }
 #endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
        set_sample_period();
 
        if (watchdog_user_enabled)
-               watchdog_enable_all_cpus();
+               watchdog_enable_all_cpus(false);
 }
index 3f0494c9d57aaf24cbbea90231e010d1d06f3e04..8499c810909a58ae100b7db96a01ea74b5c14948 100644 (file)
@@ -14,6 +14,8 @@
 
 const char hex_asc[] = "0123456789abcdef";
 EXPORT_SYMBOL(hex_asc);
+const char hex_asc_upper[] = "0123456789ABCDEF";
+EXPORT_SYMBOL(hex_asc_upper);
 
 /**
  * hex_to_bin - convert a hex digit to its real value
index 962175134702dace0078edfd0ec7f092bc646536..084f7b18d0c0a722e215dce8d14dcda0e6ba78d8 100644 (file)
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
 {
        struct kobject *kobj = container_of(kref, struct kobject, kref);
 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
-       pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n",
+       pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
                 kobject_name(kobj), kobj, __func__, kobj->parent);
        INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
        schedule_delayed_work(&kobj->release, HZ);
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
 
 bool kobj_ns_current_may_mount(enum kobj_ns_type type)
 {
-       bool may_mount = false;
-
-       if (type == KOBJ_NS_TYPE_NONE)
-               return true;
+       bool may_mount = true;
 
        spin_lock(&kobj_ns_type_lock);
        if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
index e2cd2c0a882126c58e04e47102fb4975c5247d3c..af6e95d0bed6122bf9fd1e4af2bd76a4e2be62b3 100644 (file)
@@ -3,6 +3,22 @@
 
 #ifdef CONFIG_CMPXCHG_LOCKREF
 
+/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
 /*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
                struct lockref new = old, prev = old;                           \
                CODE                                                            \
-               old.lock_count = cmpxchg(&lockref->lock_count,                  \
-                                        old.lock_count, new.lock_count);       \
+               old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
+                                                  old.lock_count,              \
+                                                  new.lock_count);             \
                if (likely(old.lock_count == prev.lock_count)) {                \
                        SUCCESS;                                                \
                }                                                               \
-               cpu_relax();                                                    \
+               arch_mutex_cpu_relax();                                         \
        }                                                                       \
 } while (0)
 
@@ -136,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
        assert_spin_locked(&lockref->lock);
        lockref->count = -128;
 }
+EXPORT_SYMBOL(lockref_mark_dead);
 
 /**
  * lockref_get_not_dead - Increments count unless the ref is dead
index 7deeb6297a483d7272acb30c4f3b0c0d6b0e76b3..1a53d497a8c53ae460686af6107531c4161eae53 100644 (file)
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
        ref->release = release;
        return 0;
 }
+EXPORT_SYMBOL_GPL(percpu_ref_init);
 
 /**
  * percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
                free_percpu(ref->pcpu_count);
        }
 }
+EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
 
 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 {
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
 }
+EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
index 026771a9b0976068f64935b60c9da8526dbe7b63..394838f489ebae1b9d00660af54ceac857538653 100644 (file)
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
 config MEMORY_HOTREMOVE
        bool "Allow for memory hot remove"
        select MEMORY_ISOLATION
-       select HAVE_BOOTMEM_INFO_NODE if X86_64
+       select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
        depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
        depends on MIGRATION
 
index c9f0a4339a7dafc2ba7295e49ad8fcdda8fa13de..5a7d58fb883bfa1c4917e48d251cd132c8d9baf9 100644 (file)
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        struct bio_vec *to, *from;
        unsigned i;
 
+       if (force)
+               goto bounce;
        bio_for_each_segment(from, *bio_orig, i)
                if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
                        goto bounce;
index c43789388cd8667536bfd9644a5bbe2cd0d35f3e..b5326b141a251905a96cd5cd995063a29ca20f91 100644 (file)
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone,
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
+               /*
+                * This can iterate a massively long zone without finding any
+                * suitable migration targets, so periodically check if we need
+                * to schedule.
+                */
+               cond_resched();
+
                if (!pfn_valid(pfn))
                        continue;
 
index 1e6aec4a2d2ebae29c0fea0405a7e81f422beeb4..ccb87cc8f07c08a5891c7cd40414c54da47449c1 100644 (file)
@@ -1315,44 +1315,6 @@ out:
        file_accessed(filp);
 }
 
-int file_read_actor(read_descriptor_t *desc, struct page *page,
-                       unsigned long offset, unsigned long size)
-{
-       char *kaddr;
-       unsigned long left, count = desc->count;
-
-       if (size > count)
-               size = count;
-
-       /*
-        * Faults on the destination of a read are common, so do it before
-        * taking the kmap.
-        */
-       if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page);
-               left = __copy_to_user_inatomic(desc->arg.buf,
-                                               kaddr + offset, size);
-               kunmap_atomic(kaddr);
-               if (left == 0)
-                       goto success;
-       }
-
-       /* Do it the slow way */
-       kaddr = kmap(page);
-       left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
-       kunmap(page);
-
-       if (left) {
-               size -= left;
-               desc->error = -EFAULT;
-       }
-success:
-       desc->count = count - size;
-       desc->written += size;
-       desc->arg.buf += size;
-       return size;
-}
-
 /*
  * Performs necessary checks before doing a write
  * @iov:       io vector request
@@ -1392,31 +1354,41 @@ int generic_segment_checks(const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_segment_checks);
 
+int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                        unsigned long offset, unsigned long size)
+{
+       struct iov_iter *iter = desc->arg.data;
+       unsigned long copied = 0;
+
+       if (size > desc->count)
+               size = desc->count;
+
+       copied = __iov_iter_copy_to_user(page, iter, offset, size);
+       if (copied < size)
+               desc->error = -EFAULT;
+
+       iov_iter_advance(iter, copied);
+       desc->count -= copied;
+       desc->written += copied;
+
+       return copied;
+}
+
 /**
- * generic_file_aio_read - generic filesystem read routine
+ * generic_file_read_iter - generic filesystem read routine
  * @iocb:      kernel I/O control block
- * @iov:       io vector request
- * @nr_segs:   number of segments in the iovec
+ * @iter:      memory vector
  * @pos:       current file position
- *
- * This is the "read()" routine for all filesystems
- * that can use the page cache directly.
  */
 ssize_t
-generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg = 0;
-       size_t count;
+       read_descriptor_t desc;
+       ssize_t retval = 0;
+       size_t count = iov_iter_count(iter);
        loff_t *ppos = &iocb->ki_pos;
 
-       count = 0;
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1430,11 +1402,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                size = i_size_read(inode);
                if (pos < size) {
                        retval = filemap_write_and_wait_range(mapping, pos,
-                                       pos + iov_length(iov, nr_segs) - 1);
-                       if (!retval) {
+                                       pos + count - 1);
+                       if (!retval)
                                retval = mapping->a_ops->direct_IO(READ, iocb,
-                                                       iov, pos, nr_segs);
-                       }
+                                                                  iter, pos);
                        if (retval > 0) {
                                *ppos = pos + retval;
                                count -= retval;
@@ -1455,42 +1426,47 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       count = retval;
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-               loff_t offset = 0;
-
-               /*
-                * If we did a short DIO read we need to skip the section of the
-                * iov that we've already read data into.
-                */
-               if (count) {
-                       if (count > iov[seg].iov_len) {
-                               count -= iov[seg].iov_len;
-                               continue;
-                       }
-                       offset = count;
-                       count = 0;
-               }
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base + offset;
-               desc.count = iov[seg].iov_len - offset;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_generic_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
+       desc.written = 0;
+       desc.arg.data = iter;
+       desc.count = count;
+       desc.error = 0;
+       do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
+       if (desc.written)
+               retval = desc.written;
+       else
+               retval = desc.error;
 out:
        return retval;
 }
+EXPORT_SYMBOL(generic_file_read_iter);
+
+/**
+ * generic_file_aio_read - generic filesystem read routine
+ * @iocb:      kernel I/O control block
+ * @iov:       io vector request
+ * @nr_segs:   number of segments in the iovec
+ * @pos:       current file position
+ *
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t
+generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
+{
+       struct iov_iter iter;
+       int ret;
+       size_t count;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+       if (ret)
+               return ret;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       return generic_file_read_iter(iocb, &iter, pos);
+}
 EXPORT_SYMBOL(generic_file_aio_read);
 
 #ifdef CONFIG_MMU
@@ -1616,7 +1592,6 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = mapping->host;
        pgoff_t offset = vmf->pgoff;
        struct page *page;
-       bool memcg_oom;
        pgoff_t size;
        int ret = 0;
 
@@ -1625,11 +1600,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        /*
-        * Do we have something in the page cache already?  Either
-        * way, try readahead, but disable the memcg OOM killer for it
-        * as readahead is optional and no errors are propagated up
-        * the fault stack.  The OOM killer is enabled while trying to
-        * instantiate the faulting page individually below.
+        * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
        if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
@@ -1637,14 +1608,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_async_mmap_readahead(vma, ra, file, page, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
        } else if (!page) {
                /* No page in the page cache at all */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_sync_mmap_readahead(vma, ra, file, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
@@ -1952,150 +1919,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       size_t copied = 0, left = 0;
-
-       while (bytes) {
-               char __user *buf = iov->iov_base + base;
-               int copy = min(bytes, iov->iov_len - base);
-
-               base = 0;
-               left = __copy_from_user_inatomic(vaddr, buf, copy);
-               copied += copy;
-               bytes -= copy;
-               vaddr += copy;
-               iov++;
-
-               if (unlikely(left))
-                       break;
-       }
-       return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied.  If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap_atomic(kaddr);
-
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap(page);
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       if (likely(i->nr_segs == 1)) {
-               i->iov_offset += bytes;
-               i->count -= bytes;
-       } else {
-               const struct iovec *iov = i->iov;
-               size_t base = i->iov_offset;
-               unsigned long nr_segs = i->nr_segs;
-
-               /*
-                * The !iov->iov_len check ensures we skip over unlikely
-                * zero-length segments (without overruning the iovec).
-                */
-               while (bytes || unlikely(i->count && !iov->iov_len)) {
-                       int copy;
-
-                       copy = min(bytes, iov->iov_len - base);
-                       BUG_ON(!i->count || i->count < copy);
-                       i->count -= copy;
-                       bytes -= copy;
-                       base += copy;
-                       if (iov->iov_len == base) {
-                               iov++;
-                               nr_segs--;
-                               base = 0;
-                       }
-               }
-               i->iov = iov;
-               i->iov_offset = base;
-               i->nr_segs = nr_segs;
-       }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       char __user *buf = i->iov->iov_base + i->iov_offset;
-       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-       return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-       const struct iovec *iov = i->iov;
-       if (i->nr_segs == 1)
-               return i->count;
-       else
-               return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
 /*
  * Performs necessary checks before doing a write
  *
@@ -2201,9 +2024,8 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
 EXPORT_SYMBOL(pagecache_write_end);
 
 ssize_t
-generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, size_t ocount)
+generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count)
 {
        struct file     *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -2212,10 +2034,13 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        size_t          write_len;
        pgoff_t         end;
 
-       if (count != ocount)
-               *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+       if (count != iov_iter_count(iter)) {
+               written = iov_iter_shorten(iter, count);
+               if (written)
+                       goto out;
+       }
 
-       write_len = iov_length(iov, *nr_segs);
+       write_len = count;
        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
 
        written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
@@ -2242,7 +2067,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
 
        /*
         * Finally, try again to invalidate clean pages which might have been
@@ -2268,6 +2093,23 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 out:
        return written;
 }
+EXPORT_SYMBOL(generic_file_direct_write_iter);
+
+ssize_t
+generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, size_t ocount)
+{
+       struct iov_iter iter;
+       ssize_t ret;
+
+       iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
+       ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
+       /* generic_file_direct_write_iter() might have shortened the vec */
+       if (*nr_segs != iter.nr_segs)
+               *nr_segs = iter.nr_segs;
+       return ret;
+}
 EXPORT_SYMBOL(generic_file_direct_write);
 
 /*
@@ -2401,16 +2243,19 @@ again:
 }
 
 ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
+generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count, ssize_t written)
 {
        struct file *file = iocb->ki_filp;
        ssize_t status;
-       struct iov_iter i;
 
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       status = generic_perform_write(file, &i, pos);
+       if ((count + written) != iov_iter_count(iter)) {
+               int rc = iov_iter_shorten(iter, count + written);
+               if (rc)
+                       return rc;
+       }
+
+       status = generic_perform_write(file, iter, pos);
 
        if (likely(status >= 0)) {
                written += status;
@@ -2419,13 +2264,24 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        
        return written ? written : status;
 }
+EXPORT_SYMBOL(generic_file_buffered_write_iter);
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, ssize_t written)
+{
+       struct iov_iter iter;
+       iov_iter_init(&iter, iov, nr_segs, count, written);
+       return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
+                                               count, written);
+}
 EXPORT_SYMBOL(generic_file_buffered_write);
 
 /**
  * __generic_file_aio_write - write data to a file
  * @iocb:      IO state structure (file, offset, etc.)
- * @iov:       vector with data to write
- * @nr_segs:   number of segments in the vector
+ * @iter:      iov_iter specifying memory to write
  * @ppos:      position where to write
  *
  * This function does all the work needed for actually writing data to a
@@ -2440,24 +2296,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
  * A caller has to handle it. This is mainly due to the fact that we want to
  * avoid syncing under i_mutex.
  */
-ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                unsigned long nr_segs, loff_t *ppos)
+ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t *ppos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        struct inode    *inode = mapping->host;
        loff_t          pos;
        ssize_t         written;
        ssize_t         err;
 
-       ocount = 0;
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err)
-               return err;
-
-       count = ocount;
+       count = iov_iter_count(iter);
        pos = *ppos;
 
        /* We can write back this queue in page reclaim */
@@ -2484,8 +2334,8 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                loff_t endbyte;
                ssize_t written_buffered;
 
-               written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
-                                                       ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, pos,
+                                                        ppos, count);
                if (written < 0 || written == count)
                        goto out;
                /*
@@ -2494,9 +2344,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                 */
                pos += written;
                count -= written;
-               written_buffered = generic_file_buffered_write(iocb, iov,
-                                               nr_segs, pos, ppos, count,
-                                               written);
+               iov_iter_advance(iter, written);
+               written_buffered = generic_file_buffered_write_iter(iocb, iter,
+                                               pos, ppos, count, written);
                /*
                 * If generic_file_buffered_write() retuned a synchronous error
                 * then we want to return the number of bytes which were
@@ -2528,13 +2378,57 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         */
                }
        } else {
-               written = generic_file_buffered_write(iocb, iov, nr_segs,
+               iter->count = count;
+               written = generic_file_buffered_write_iter(iocb, iter,
                                pos, ppos, count, written);
        }
 out:
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
+EXPORT_SYMBOL(__generic_file_write_iter);
+
+ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
+       mutex_unlock(&inode->i_mutex);
+
+       if (ret > 0 || ret == -EIOCBQUEUED) {
+               ssize_t err;
+
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
+                       ret = err;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(generic_file_write_iter);
+
+ssize_t
+__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t *ppos)
+{
+       struct iov_iter iter;
+       size_t count;
+       int ret;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (ret)
+               goto out;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       ret = __generic_file_write_iter(iocb, &iter, ppos);
+out:
+       return ret;
+}
 EXPORT_SYMBOL(__generic_file_aio_write);
 
 /**
index 7489884682d84a6b5840fef19e90234076fd374e..610e3df2768a6a5b2ec1e293da4c96dafbbe2d30 100644 (file)
@@ -2697,6 +2697,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 
        mmun_start = haddr;
        mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2719,7 +2720,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
        split_huge_page(page);
 
        put_page(page);
-       BUG_ON(pmd_trans_huge(*pmd));
+
+       /*
+        * We don't always have down_write of mmap_sem here: a racing
+        * do_huge_pmd_wp_page() might have copied-on-write to another
+        * huge page before our split_huge_page() got the anon_vma lock.
+        */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               goto again;
 }
 
 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
index b49579c7f2a550462c334f97907f2fc131a65e00..0b7656e804d126cf0fcfa04a8b427396af86deb1 100644 (file)
@@ -653,6 +653,7 @@ static void free_huge_page(struct page *page)
        BUG_ON(page_count(page));
        BUG_ON(page_mapcount(page));
        restore_reserve = PagePrivate(page);
+       ClearPagePrivate(page);
 
        spin_lock(&hugetlb_lock);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
@@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        /* we rely on prep_new_huge_page to set the destructor */
        set_compound_order(page, order);
        __SetPageHead(page);
+       __ClearPageReserved(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               /*
+                * For gigantic hugepages allocated through bootmem at
+                * boot, it's safer to be consistent with the not-gigantic
+                * hugepages and clear the PG_reserved bit from all tail pages
+                * too.  Otherwse drivers using get_user_pages() to access tail
+                * pages may get the reference counting wrong if they see
+                * PG_reserved set on a tail page (despite the head page not
+                * having PG_reserved set).  Enforcing this consistency between
+                * head and tail pages allows drivers to optimize away a check
+                * on the head page when they need know if put_page() is needed
+                * after get_user_pages().
+                */
+               __ClearPageReserved(p);
                set_page_count(p, 0);
                p->first_page = page;
        }
@@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void)
 #else
                page = virt_to_page(m);
 #endif
-               __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
+               WARN_ON(PageReserved(page));
                prep_new_huge_page(h, page, page_to_nid(page));
                /*
                 * If we had gigantic hugepages allocated at boot time, we need
index afc2daa91c609dd8aab70f50fb58fd9100ed8326..4c84678371eb5b5905cc8c4386b512ec57e4f5e3 100644 (file)
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!hwpoison_filter_enable)
-               goto inject;
        if (!pfn_valid(pfn))
                return -ENXIO;
 
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
        if (!get_page_unless_zero(hpage))
                return 0;
 
+       if (!hwpoison_filter_enable)
+               goto inject;
+
        if (!PageLRU(p) && !PageHuge(p))
                shake_page(p, 0);
        /*
index 6975bc812542d2c13642363d928005f355199c54..539eeb96b323bf649f83783e0dddcb4f907e1d6e 100644 (file)
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
  */
 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
 {
+       struct page *p;
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       for (; start < end; start += PAGE_SIZE) {
-               struct page *p;
+       for (; start < end; start += PAGE_SIZE <<
+                               compound_order(compound_head(p))) {
                int ret;
 
                ret = get_user_pages_fast(start, 1, 0, &p);
index d5ff3ce13029b2c99b4ed402898ae0c76a143fde..9c9c685e4ddca665c11b878bd5a19ed1199f8eca 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/limits.h>
 #include <linux/export.h>
 #include <linux/mutex.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
+       struct rb_node          tree_node;      /* RB tree node */
+       unsigned long long      usage_in_excess;/* Set to the value by which */
+                                               /* the soft limit is exceeded*/
+       bool                    on_tree;
        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
                                                /* use container_of        */
 };
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 };
 
+/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+       struct rb_root rb_root;
+       spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+       struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+       struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
 struct mem_cgroup_threshold {
        struct eventfd_ctx *eventfd;
        u64 threshold;
@@ -286,7 +311,7 @@ struct mem_cgroup {
 
        atomic_t        dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-       struct tcp_memcontrol tcp_mem;
+       struct cg_proto tcp_mem;
 #endif
 #if defined(CONFIG_MEMCG_KMEM)
        /* analogous to slab_common's slab_caches list. per-memcg */
@@ -303,22 +328,6 @@ struct mem_cgroup {
        atomic_t        numainfo_events;
        atomic_t        numainfo_updating;
 #endif
-       /*
-        * Protects soft_contributed transitions.
-        * See mem_cgroup_update_soft_limit
-        */
-       spinlock_t soft_lock;
-
-       /*
-        * If true then this group has increased parents' children_in_excess
-        * when it got over the soft limit.
-        * When a group falls bellow the soft limit, parents' children_in_excess
-        * is decreased and soft_contributed changed to false.
-        */
-       bool soft_contributed;
-
-       /* Number of children that are in soft limit excess */
-       atomic_t children_in_excess;
 
        struct mem_cgroup_per_node *nodeinfo[0];
        /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
  * limit reclaim to prevent infinite loops, if they ever occur.
  */
 #define        MEM_CGROUP_MAX_RECLAIM_LOOPS            100
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -540,13 +550,13 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
        if (!memcg || mem_cgroup_is_root(memcg))
                return NULL;
 
-       return &memcg->tcp_mem.cg_proto;
+       return &memcg->tcp_mem;
 }
 EXPORT_SYMBOL(tcp_proto_cgroup);
 
 static void disarm_sock_keys(struct mem_cgroup *memcg)
 {
-       if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+       if (!memcg_proto_activated(&memcg->tcp_mem))
                return;
        static_key_slow_dec(&memcg_socket_limit_enabled);
 }
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
        return mem_cgroup_zoneinfo(memcg, nid, zid);
 }
 
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
+{
+       struct rb_node **p = &mctz->rb_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct mem_cgroup_per_zone *mz_node;
+
+       if (mz->on_tree)
+               return;
+
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
+       while (*p) {
+               parent = *p;
+               mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+                                       tree_node);
+               if (mz->usage_in_excess < mz_node->usage_in_excess)
+                       p = &(*p)->rb_left;
+               /*
+                * We can't avoid mem cgroups that are over their soft
+                * limit by the same amount
+                */
+               else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&mz->tree_node, parent, p);
+       rb_insert_color(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       if (!mz->on_tree)
+               return;
+       rb_erase(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       spin_lock(&mctz->lock);
+       __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+       spin_unlock(&mctz->lock);
+}
+
+
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
+{
+       unsigned long long excess;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+       mctz = soft_limit_tree_from_page(page);
+
+       /*
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
+        */
+       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+               mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+               excess = res_counter_soft_limit_excess(&memcg->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
+       }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
+{
+       int node, zone;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+
+       for_each_node(node) {
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       mz = mem_cgroup_zoneinfo(memcg, node, zone);
+                       mctz = soft_limit_tree_node_zone(node, zone);
+                       mem_cgroup_remove_exceeded(memcg, mz, mctz);
+               }
+       }
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct rb_node *rightmost = NULL;
+       struct mem_cgroup_per_zone *mz;
+
+retry:
+       mz = NULL;
+       rightmost = rb_last(&mctz->rb_root);
+       if (!rightmost)
+               goto done;              /* Nothing to reclaim from */
+
+       mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+       /*
+        * Remove the node now but someone else can add it back,
+        * we will to add it back at the end of reclaim to its correct
+        * position in the tree.
+        */
+       __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+       if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
+               !css_tryget(&mz->memcg->css))
+               goto retry;
+done:
+       return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       spin_lock(&mctz->lock);
+       mz = __mem_cgroup_largest_soft_limit_node(mctz);
+       spin_unlock(&mctz->lock);
+       return mz;
+}
+
 /*
  * Implementation Note: reading percpu statistics for memcg.
  *
@@ -698,6 +866,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        unsigned long val = 0;
        int cpu;
 
+       get_online_cpus();
        for_each_online_cpu(cpu)
                val += per_cpu(memcg->stat->events[idx], cpu);
 #ifdef CONFIG_HOTPLUG_CPU
@@ -705,6 +874,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        val += memcg->nocpu_base.events[idx];
        spin_unlock(&memcg->pcp_counter_lock);
 #endif
+       put_online_cpus();
        return val;
 }
 
@@ -821,48 +991,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
        return false;
 }
 
-/*
- * Called from rate-limited memcg_check_events when enough
- * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
- * that all the parents up the hierarchy will be notified that this group
- * is in excess or that it is not in excess anymore. mmecg->soft_contributed
- * makes the transition a single action whenever the state flips from one to
- * the other.
- */
-static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
-{
-       unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
-       struct mem_cgroup *parent = memcg;
-       int delta = 0;
-
-       spin_lock(&memcg->soft_lock);
-       if (excess) {
-               if (!memcg->soft_contributed) {
-                       delta = 1;
-                       memcg->soft_contributed = true;
-               }
-       } else {
-               if (memcg->soft_contributed) {
-                       delta = -1;
-                       memcg->soft_contributed = false;
-               }
-       }
-
-       /*
-        * Necessary to update all ancestors when hierarchy is used
-        * because their event counter is not touched.
-        * We track children even outside the hierarchy for the root
-        * cgroup because tree walk starting at root should visit
-        * all cgroups and we want to prevent from pointless tree
-        * walk if no children is below the limit.
-        */
-       while (delta && (parent = parent_mem_cgroup(parent)))
-               atomic_add(delta, &parent->children_in_excess);
-       if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-               atomic_add(delta, &root_mem_cgroup->children_in_excess);
-       spin_unlock(&memcg->soft_lock);
-}
-
 /*
  * Check events in order.
  *
@@ -886,7 +1014,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 
                mem_cgroup_threshold(memcg);
                if (unlikely(do_softlimit))
-                       mem_cgroup_update_soft_limit(memcg);
+                       mem_cgroup_update_tree(memcg, page);
 #if MAX_NUMNODES > 1
                if (unlikely(do_numainfo))
                        atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1057,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return memcg;
 }
 
-static enum mem_cgroup_filter_t
-mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
-               mem_cgroup_iter_filter cond)
-{
-       if (!cond)
-               return VISIT;
-       return cond(memcg, root);
-}
-
 /*
  * Returns a next (in a pre-order walk) alive memcg (with elevated css
  * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1064,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
  * helper function to be used by mem_cgroup_iter
  */
 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
-               struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
+               struct mem_cgroup *last_visited)
 {
        struct cgroup_subsys_state *prev_css, *next_css;
 
@@ -963,31 +1082,11 @@ skip_node:
        if (next_css) {
                struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
 
-               switch (mem_cgroup_filter(mem, root, cond)) {
-               case SKIP:
+               if (css_tryget(&mem->css))
+                       return mem;
+               else {
                        prev_css = next_css;
                        goto skip_node;
-               case SKIP_TREE:
-                       if (mem == root)
-                               return NULL;
-                       /*
-                        * css_rightmost_descendant is not an optimal way to
-                        * skip through a subtree (especially for imbalanced
-                        * trees leaning to right) but that's what we have right
-                        * now. More effective solution would be traversing
-                        * right-up for first non-NULL without calling
-                        * css_next_descendant_pre afterwards.
-                        */
-                       prev_css = css_rightmost_descendant(next_css);
-                       goto skip_node;
-               case VISIT:
-                       if (css_tryget(&mem->css))
-                               return mem;
-                       else {
-                               prev_css = next_css;
-                               goto skip_node;
-                       }
-                       break;
                }
        }
 
@@ -1051,7 +1150,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * @root: hierarchy root
  * @prev: previously returned memcg, NULL on first invocation
  * @reclaim: cookie for shared reclaim walks, NULL for full walks
- * @cond: filter for visited nodes, NULL for no filter
  *
  * Returns references to children of the hierarchy below @root, or
  * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1162,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * divide up the memcgs in the hierarchy among all concurrent
  * reclaimers operating on the same zone and priority.
  */
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                                   struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond)
+                                  struct mem_cgroup_reclaim_cookie *reclaim)
 {
        struct mem_cgroup *memcg = NULL;
        struct mem_cgroup *last_visited = NULL;
 
-       if (mem_cgroup_disabled()) {
-               /* first call must return non-NULL, second return NULL */
-               return (struct mem_cgroup *)(unsigned long)!prev;
-       }
+       if (mem_cgroup_disabled())
+               return NULL;
 
        if (!root)
                root = root_mem_cgroup;
@@ -1086,9 +1181,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
        if (!root->use_hierarchy && root != root_mem_cgroup) {
                if (prev)
                        goto out_css_put;
-               if (mem_cgroup_filter(root, root, cond) == VISIT)
-                       return root;
-               return NULL;
+               return root;
        }
 
        rcu_read_lock();
@@ -1111,7 +1204,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                        last_visited = mem_cgroup_iter_load(iter, root, &seq);
                }
 
-               memcg = __mem_cgroup_iter_next(root, last_visited, cond);
+               memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
                        mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1215,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                                reclaim->generation = iter->generation;
                }
 
-               /*
-                * We have finished the whole tree walk or no group has been
-                * visited because filter told us to skip the root node.
-                */
-               if (!memcg && (prev || (cond && !last_visited)))
+               if (prev && !memcg)
                        goto out_unlock;
        }
 out_unlock:
@@ -1767,7 +1856,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
        return total;
 }
 
-#if MAX_NUMNODES > 1
 /**
  * test_mem_cgroup_node_reclaimable
  * @memcg: the target memcg
@@ -1790,6 +1878,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
        return false;
 
 }
+#if MAX_NUMNODES > 1
 
 /*
  * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1946,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        return node;
 }
 
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+{
+       int nid;
+
+       /*
+        * quick check...making use of scan_node.
+        * We can skip unused nodes.
+        */
+       if (!nodes_empty(memcg->scan_nodes)) {
+               for (nid = first_node(memcg->scan_nodes);
+                    nid < MAX_NUMNODES;
+                    nid = next_node(nid, memcg->scan_nodes)) {
+
+                       if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                               return true;
+               }
+       }
+       /*
+        * Check rest of nodes.
+        */
+       for_each_node_state(nid, N_MEMORY) {
+               if (node_isset(nid, memcg->scan_nodes))
+                       continue;
+               if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                       return true;
+       }
+       return false;
+}
+
 #else
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
 {
        return 0;
 }
 
-#endif
-
-/*
- * A group is eligible for the soft limit reclaim under the given root
- * hierarchy if
- *     a) it is over its soft limit
- *     b) any parent up the hierarchy is over its soft limit
- *
- * If the given group doesn't have any children over the limit then it
- * doesn't make any sense to iterate its subtree.
- */
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
-       struct mem_cgroup *parent;
-
-       if (!memcg)
-               memcg = root_mem_cgroup;
-       parent = memcg;
-
-       if (res_counter_soft_limit_excess(&memcg->res))
-               return VISIT;
+       return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
+}
+#endif
 
-       /*
-        * If any parent up to the root in the hierarchy is over its soft limit
-        * then we have to obey and reclaim from this group as well.
-        */
-       while ((parent = parent_mem_cgroup(parent))) {
-               if (res_counter_soft_limit_excess(&parent->res))
-                       return VISIT;
-               if (parent == root)
+static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
+                                  struct zone *zone,
+                                  gfp_t gfp_mask,
+                                  unsigned long *total_scanned)
+{
+       struct mem_cgroup *victim = NULL;
+       int total = 0;
+       int loop = 0;
+       unsigned long excess;
+       unsigned long nr_scanned;
+       struct mem_cgroup_reclaim_cookie reclaim = {
+               .zone = zone,
+               .priority = 0,
+       };
+
+       excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
+
+       while (1) {
+               victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
+               if (!victim) {
+                       loop++;
+                       if (loop >= 2) {
+                               /*
+                                * If we have not been able to reclaim
+                                * anything, it might because there are
+                                * no reclaimable pages under this hierarchy
+                                */
+                               if (!total)
+                                       break;
+                               /*
+                                * We want to do more targeted reclaim.
+                                * excess >> 2 is not to excessive so as to
+                                * reclaim too much, nor too less that we keep
+                                * coming back to reclaim from this cgroup
+                                */
+                               if (total >= (excess >> 2) ||
+                                       (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
+                                       break;
+                       }
+                       continue;
+               }
+               if (!mem_cgroup_reclaimable(victim, false))
+                       continue;
+               total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
+                                                    zone, &nr_scanned);
+               *total_scanned += nr_scanned;
+               if (!res_counter_soft_limit_excess(&root_memcg->res))
                        break;
        }
-
-       if (!atomic_read(&memcg->children_in_excess))
-               return SKIP_TREE;
-       return SKIP;
+       mem_cgroup_iter_break(root_memcg, victim);
+       return total;
 }
 
 static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2018,110 +2161,59 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
                memcg_wakeup_oom(memcg);
 }
 
-/*
- * try to call OOM killer
- */
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       bool locked;
-       int wakeups;
-
        if (!current->memcg_oom.may_oom)
                return;
-
-       current->memcg_oom.in_memcg_oom = 1;
-
        /*
-        * As with any blocking lock, a contender needs to start
-        * listening for wakeups before attempting the trylock,
-        * otherwise it can miss the wakeup from the unlock and sleep
-        * indefinitely.  This is just open-coded because our locking
-        * is so particular to memcg hierarchies.
+        * We are in the middle of the charge context here, so we
+        * don't want to block when potentially sitting on a callstack
+        * that holds all kinds of filesystem and mm locks.
+        *
+        * Also, the caller may handle a failed allocation gracefully
+        * (like optional page cache readahead) and so an OOM killer
+        * invocation might not even be necessary.
+        *
+        * That's why we don't do anything here except remember the
+        * OOM context and then deal with it at the end of the page
+        * fault when the stack is unwound, the locks are released,
+        * and when we know whether the fault was overall successful.
         */
-       wakeups = atomic_read(&memcg->oom_wakeups);
-       mem_cgroup_mark_under_oom(memcg);
-
-       locked = mem_cgroup_oom_trylock(memcg);
-
-       if (locked)
-               mem_cgroup_oom_notify(memcg);
-
-       if (locked && !memcg->oom_kill_disable) {
-               mem_cgroup_unmark_under_oom(memcg);
-               mem_cgroup_out_of_memory(memcg, mask, order);
-               mem_cgroup_oom_unlock(memcg);
-               /*
-                * There is no guarantee that an OOM-lock contender
-                * sees the wakeups triggered by the OOM kill
-                * uncharges.  Wake any sleepers explicitely.
-                */
-               memcg_oom_recover(memcg);
-       } else {
-               /*
-                * A system call can just return -ENOMEM, but if this
-                * is a page fault and somebody else is handling the
-                * OOM already, we need to sleep on the OOM waitqueue
-                * for this memcg until the situation is resolved.
-                * Which can take some time because it might be
-                * handled by a userspace task.
-                *
-                * However, this is the charge context, which means
-                * that we may sit on a large call stack and hold
-                * various filesystem locks, the mmap_sem etc. and we
-                * don't want the OOM handler to deadlock on them
-                * while we sit here and wait.  Store the current OOM
-                * context in the task_struct, then return -ENOMEM.
-                * At the end of the page fault handler, with the
-                * stack unwound, pagefault_out_of_memory() will check
-                * back with us by calling
-                * mem_cgroup_oom_synchronize(), possibly putting the
-                * task to sleep.
-                */
-               current->memcg_oom.oom_locked = locked;
-               current->memcg_oom.wakeups = wakeups;
-               css_get(&memcg->css);
-               current->memcg_oom.wait_on_memcg = memcg;
-       }
+       css_get(&memcg->css);
+       current->memcg_oom.memcg = memcg;
+       current->memcg_oom.gfp_mask = mask;
+       current->memcg_oom.order = order;
 }
 
 /**
  * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
  *
- * This has to be called at the end of a page fault if the the memcg
- * OOM handler was enabled and the fault is returning %VM_FAULT_OOM.
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
  *
- * Memcg supports userspace OOM handling, so failed allocations must
+ * Memcg supports userspace OOM handling where failed allocations must
  * sleep on a waitqueue until the userspace task resolves the
  * situation.  Sleeping directly in the charge context with all kinds
  * of locks held is not a good idea, instead we remember an OOM state
  * in the task and mem_cgroup_oom_synchronize() has to be called at
- * the end of the page fault to put the task to sleep and clean up the
- * OOM state.
+ * the end of the page fault to complete the OOM handling.
  *
  * Returns %true if an ongoing memcg OOM situation was detected and
- * finalized, %false otherwise.
+ * completed, %false otherwise.
  */
-bool mem_cgroup_oom_synchronize(void)
+bool mem_cgroup_oom_synchronize(bool handle)
 {
+       struct mem_cgroup *memcg = current->memcg_oom.memcg;
        struct oom_wait_info owait;
-       struct mem_cgroup *memcg;
+       bool locked;
 
        /* OOM is global, do not handle */
-       if (!current->memcg_oom.in_memcg_oom)
-               return false;
-
-       /*
-        * We invoked the OOM killer but there is a chance that a kill
-        * did not free up any charges.  Everybody else might already
-        * be sleeping, so restart the fault and keep the rampage
-        * going until some charges are released.
-        */
-       memcg = current->memcg_oom.wait_on_memcg;
        if (!memcg)
-               goto out;
+               return false;
 
-       if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
-               goto out_memcg;
+       if (!handle)
+               goto cleanup;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
@@ -2130,13 +2222,25 @@ bool mem_cgroup_oom_synchronize(void)
        INIT_LIST_HEAD(&owait.wait.task_list);
 
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       /* Only sleep if we didn't miss any wakeups since OOM */
-       if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups)
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
+       if (locked)
+               mem_cgroup_oom_notify(memcg);
+
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+                                        current->memcg_oom.order);
+       } else {
                schedule();
-       finish_wait(&memcg_oom_waitq, &owait.wait);
-out_memcg:
-       mem_cgroup_unmark_under_oom(memcg);
-       if (current->memcg_oom.oom_locked) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+       }
+
+       if (locked) {
                mem_cgroup_oom_unlock(memcg);
                /*
                 * There is no guarantee that an OOM-lock contender
@@ -2145,10 +2249,9 @@ out_memcg:
                 */
                memcg_oom_recover(memcg);
        }
+cleanup:
+       current->memcg_oom.memcg = NULL;
        css_put(&memcg->css);
-       current->memcg_oom.wait_on_memcg = NULL;
-out:
-       current->memcg_oom.in_memcg_oom = 0;
        return true;
 }
 
@@ -2562,6 +2665,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                     || fatal_signal_pending(current)))
                goto bypass;
 
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -2660,6 +2766,8 @@ done:
        return 0;
 nomem:
        *ptr = NULL;
+       if (gfp_mask & __GFP_NOFAIL)
+               return 0;
        return -ENOMEM;
 bypass:
        *ptr = root_mem_cgroup;
@@ -2812,7 +2920,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
        unlock_page_cgroup(pc);
 
        /*
-        * "charge_statistics" updated event counter.
+        * "charge_statistics" updated event counter. Then, check it.
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
         */
        memcg_check_events(memcg, page);
 }
@@ -4647,6 +4757,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
+{
+       unsigned long nr_reclaimed = 0;
+       struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+       unsigned long reclaimed;
+       int loop = 0;
+       struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
+       unsigned long nr_scanned;
+
+       if (order > 0)
+               return 0;
+
+       mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
+       /*
+        * This loop can run a while, specially if mem_cgroup's continuously
+        * keep exceeding their soft limit and putting the system under
+        * pressure
+        */
+       do {
+               if (next_mz)
+                       mz = next_mz;
+               else
+                       mz = mem_cgroup_largest_soft_limit_node(mctz);
+               if (!mz)
+                       break;
+
+               nr_scanned = 0;
+               reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
+                                                   gfp_mask, &nr_scanned);
+               nr_reclaimed += reclaimed;
+               *total_scanned += nr_scanned;
+               spin_lock(&mctz->lock);
+
+               /*
+                * If we failed to reclaim anything from this memory cgroup
+                * it is time to move on to the next cgroup
+                */
+               next_mz = NULL;
+               if (!reclaimed) {
+                       do {
+                               /*
+                                * Loop until we find yet another one.
+                                *
+                                * By the time we get the soft_limit lock
+                                * again, someone might have aded the
+                                * group back on the RB tree. Iterate to
+                                * make sure we get a different mem.
+                                * mem_cgroup_largest_soft_limit_node returns
+                                * NULL if no other cgroup is present on
+                                * the tree
+                                */
+                               next_mz =
+                               __mem_cgroup_largest_soft_limit_node(mctz);
+                               if (next_mz == mz)
+                                       css_put(&next_mz->memcg->css);
+                               else /* next_mz == NULL or other memcg */
+                                       break;
+                       } while (1);
+               }
+               __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->memcg->res);
+               /*
+                * One school of thought says that we should not add
+                * back the node to the tree if reclaim returns 0.
+                * But our reclaim could return 0, simply because due
+                * to priority we are exposing a smaller subset of
+                * memory to reclaim from. Consider this as a longer
+                * term TODO.
+                */
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
+               spin_unlock(&mctz->lock);
+               css_put(&mz->memcg->css);
+               loop++;
+               /*
+                * Could not reclaim anything and there are no more
+                * mem cgroups to try or we seem to be looping without
+                * reclaiming anything.
+                */
+               if (!nr_reclaimed &&
+                       (next_mz == NULL ||
+                       loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+                       break;
+       } while (!nr_reclaimed);
+       if (next_mz)
+               css_put(&next_mz->memcg->css);
+       return nr_reclaimed;
+}
+
 /**
  * mem_cgroup_force_empty_list - clears LRU of a group
  * @memcg: group to clear
@@ -5911,6 +6113,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
                lruvec_init(&mz->lruvec);
+               mz->usage_in_excess = 0;
+               mz->on_tree = false;
                mz->memcg = memcg;
        }
        memcg->nodeinfo[node] = pn;
@@ -5966,6 +6170,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        int node;
        size_t size = memcg_size();
 
+       mem_cgroup_remove_from_trees(memcg);
        free_css_id(&mem_cgroup_subsys, &memcg->css);
 
        for_each_node(node)
@@ -6002,6 +6207,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
+static void __init mem_cgroup_soft_limit_tree_init(void)
+{
+       struct mem_cgroup_tree_per_node *rtpn;
+       struct mem_cgroup_tree_per_zone *rtpz;
+       int tmp, node, zone;
+
+       for_each_node(node) {
+               tmp = node;
+               if (!node_state(node, N_NORMAL_MEMORY))
+                       tmp = -1;
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+               BUG_ON(!rtpn);
+
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+       }
+}
+
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -6031,7 +6259,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
        vmpressure_init(&memcg->vmpressure);
-       spin_lock_init(&memcg->soft_lock);
 
        return &memcg->css;
 
@@ -6109,13 +6336,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
-       if (memcg->soft_contributed) {
-               while ((memcg = parent_mem_cgroup(memcg)))
-                       atomic_dec(&memcg->children_in_excess);
-
-               if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-                       atomic_dec(&root_mem_cgroup->children_in_excess);
-       }
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }
@@ -6790,6 +7010,7 @@ static int __init mem_cgroup_init(void)
 {
        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        enable_swap_cgroup();
+       mem_cgroup_soft_limit_tree_init();
        memcg_stock_init();
        return 0;
 }
index 947ed5413279261a830eeaeb42d9392aea0f8fa8..bf3351b5115e54915a3d7eaa718d10a9771b2c5f 100644 (file)
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                         * shake_page could have turned it free.
                         */
                        if (is_free_buddy_page(p)) {
-                               action_result(pfn, "free buddy, 2nd try",
-                                               DELAYED);
+                               if (flags & MF_COUNT_INCREASED)
+                                       action_result(pfn, "free buddy", DELAYED);
+                               else
+                                       action_result(pfn, "free buddy, 2nd try", DELAYED);
                                return 0;
                        }
                        action_result(pfn, "non LRU", IGNORED);
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn)
         * worked by memory_failure() and the page lock is not held yet.
         * In such case, we yield to memory_failure() and make unpoison fail.
         */
-       if (PageTransHuge(page)) {
+       if (!PageHuge(page) && PageTransHuge(page)) {
                pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
                        return 0;
        }
index ca00039471152eae75bb8321a129a4edfc6f580e..1311f26497e6a0f776682ed8a7e23b8620a5b9ad 100644 (file)
@@ -837,6 +837,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                         */
                                        make_migration_entry_read(&entry);
                                        pte = swp_entry_to_pte(entry);
+                                       if (pte_swp_soft_dirty(*src_pte))
+                                               pte = pte_swp_mksoft_dirty(pte);
                                        set_pte_at(src_mm, addr, src_pte, pte);
                                }
                        }
@@ -3863,15 +3865,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * space.  Kernel faults are handled more gracefully.
         */
        if (flags & FAULT_FLAG_USER)
-               mem_cgroup_enable_oom();
+               mem_cgroup_oom_enable();
 
        ret = __handle_mm_fault(mm, vma, address, flags);
 
-       if (flags & FAULT_FLAG_USER)
-               mem_cgroup_disable_oom();
-
-       if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)))
-               mem_cgroup_oom_synchronize();
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
 
        return ret;
 }
index 9c8d5f59d30bb87e63c9990c085eb646e69b4ed7..7a7325ee1d089696a8073a84d2f748f326124805 100644 (file)
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (unlikely(balloon_page_movable(page)))
+               if (unlikely(isolated_balloon_page(page)))
                        balloon_page_putback(page);
                else
                        putback_lru_page(page);
@@ -161,6 +161,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+       if (pte_swp_soft_dirty(*ptep))
+               pte = pte_mksoft_dirty(pte);
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
 #ifdef CONFIG_HUGETLB_PAGE
index d63802663242eb6ad13ca2ed065434f24fd7e5d1..d480cd6fc475854259bdd51021d5125dbdfbe479 100644 (file)
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
 
        /*
         * Initialize pte walk starting at the already pinned page where we
-        * are sure that there is a pte.
+        * are sure that there is a pte, as it was pinned under the same
+        * mmap_sem write op.
         */
        pte = get_locked_pte(vma->vm_mm, start, &ptl);
-       end = min(end, pmd_addr_end(start, end));
+       /* Make sure we do not cross the page table boundary */
+       end = pgd_addr_end(start, end);
+       end = pud_addr_end(start, end);
+       end = pmd_addr_end(start, end);
 
        /* The page next to the pinned page is the first we will try to get */
        start += PAGE_SIZE;
@@ -736,6 +740,7 @@ static int do_mlockall(int flags)
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+               cond_resched();
        }
 out:
        return 0;
index 94722a4d6b438311de1d1690d81a0d598a907339..a3af058f68e4d9f434337d0dcd6127d5d8c6d039 100644 (file)
@@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 
                        if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
                                /*
                                 * A protection check is difficult so
                                 * just be safe and disable write
                                 */
                                make_migration_entry_read(&entry);
-                               set_pte_at(mm, addr, pte,
-                                       swp_entry_to_pte(entry));
+                               newpte = swp_entry_to_pte(entry);
+                               if (pte_swp_soft_dirty(oldpte))
+                                       newpte = pte_swp_mksoft_dirty(newpte);
+                               set_pte_at(mm, addr, pte, newpte);
                        }
                        pages++;
                }
index 91b13d6a16d453b50894e6028800b92399bf8f14..0843feb66f3d0236abd4386b5bfd0170c24ae0ef 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 
 #include "internal.h"
 
@@ -63,10 +62,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                return NULL;
 
        pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd) {
-               pud_free(mm, pud);
+       if (!pmd)
                return NULL;
-       }
 
        VM_BUG_ON(pmd_trans_huge(*pmd));
 
index 314e9d2743813ea5e52c08929b8a4cbb4621dfa9..6738c47f1f7280edc5f3fe610b2658195a0a77e0 100644 (file)
@@ -680,7 +680,7 @@ void pagefault_out_of_memory(void)
 {
        struct zonelist *zonelist;
 
-       if (mem_cgroup_oom_synchronize())
+       if (mem_cgroup_oom_synchronize(true))
                return;
 
        zonelist = node_zonelist(first_online_node, GFP_KERNEL);
index f5236f804aa6cdf9800f445c31950a52cdd6b88f..63807583d8e89f1c96f8b05bcf5fe422ed200c26 100644 (file)
@@ -1210,11 +1210,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
        return 1;
 }
 
-static long bdi_max_pause(struct backing_dev_info *bdi,
-                         unsigned long bdi_dirty)
+static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
+                                  unsigned long bdi_dirty)
 {
-       long bw = bdi->avg_write_bandwidth;
-       long t;
+       unsigned long bw = bdi->avg_write_bandwidth;
+       unsigned long t;
 
        /*
         * Limit pause time for small memory systems. If sleeping for too long
@@ -1226,7 +1226,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
        t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
        t++;
 
-       return min_t(long, t, MAX_PAUSE);
+       return min_t(unsigned long, t, MAX_PAUSE);
 }
 
 static long bdi_min_pause(struct backing_dev_info *bdi,
index 0ee638f76ebe584cd40d7541bac886b96b03162e..dd886fac451ab6ab7d6a3132fd2587c10538f300 100644 (file)
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                list_del(&page->lru);
                rmv_page_order(page);
                zone->free_area[order].nr_free--;
-#ifdef CONFIG_HIGHMEM
-               if (PageHighMem(page))
-                       totalhigh_pages -= 1 << order;
-#endif
                for (i = 0; i < (1 << order); i++)
                        SetPageReserved((page+i));
                pfn += (1 << order);
index 8c79a4764be0c99a1d573d174e2d247e90079519..e4e6a4f57b09f2c304683e026a8dab05f98a500d 100644 (file)
@@ -258,11 +258,14 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        if (sis->flags & SWP_FILE) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
-               struct address_space *mapping = swap_file->f_mapping;
-               struct iovec iov = {
-                       .iov_base = kmap(page),
-                       .iov_len  = PAGE_SIZE,
+               struct bio_vec bvec = {
+                       .bv_page = kmap(page),
+                       .bv_len = PAGE_SIZE,
+                       .bv_offset = 0,
                };
+               struct iov_iter iter;
+
+               iov_iter_init_bvec(&iter, &bvec, 1, PAGE_SIZE, 0);
 
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
@@ -270,9 +273,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 
                set_page_writeback(page);
                unlock_page(page);
-               ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
-                                               &kiocb, &iov,
-                                               kiocb.ki_pos, 1);
+               ret = swap_file->f_op->write_iter(&kiocb, &iter, kiocb.ki_pos);
                kunmap(page);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
index 8297623fcaedec21b37b5080d376967e673afe92..8612a95d7d7e2f7ddba00076b3db0e13b2f8c7c5 100644 (file)
@@ -1464,14 +1464,23 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        return copied;
 }
 
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter, loff_t pos)
 {
+       read_descriptor_t desc;
+       loff_t *ppos = &iocb->ki_pos;
+       struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
        struct address_space *mapping = inode->i_mapping;
        pgoff_t index;
        unsigned long offset;
        enum sgp_type sgp = SGP_READ;
 
+       desc.written = 0;
+       desc.count = iov_iter_count(iter);
+       desc.arg.data = iter;
+       desc.error = 0;
+
        /*
         * Might this read be for a stacking filesystem?  Then when reading
         * holes of a sparse file, we actually need to allocate those pages,
@@ -1498,10 +1507,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                break;
                }
 
-               desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
-               if (desc->error) {
-                       if (desc->error == -EINVAL)
-                               desc->error = 0;
+               desc.error = shmem_getpage(inode, index, &page, sgp, NULL);
+               if (desc.error) {
+                       if (desc.error == -EINVAL)
+                               desc.error = 0;
                        break;
                }
                if (page)
@@ -1552,13 +1561,13 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                 * "pos" here (the actor routine has to update the user buffer
                 * pointers and the remaining count).
                 */
-               ret = actor(desc, page, offset, nr);
+               ret = file_read_iter_actor(&desc, page, offset, nr);
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
 
                page_cache_release(page);
-               if (ret != nr || !desc->count)
+               if (ret != nr || !desc.count)
                        break;
 
                cond_resched();
@@ -1566,40 +1575,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
 
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
        file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg;
-       size_t count;
-       loff_t *ppos = &iocb->ki_pos;
 
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base;
-               desc.count = iov[seg].iov_len;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_shmem_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
-       return retval;
+       return desc.written ? desc.written : desc.error;
 }
 
 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -2724,8 +2701,8 @@ static const struct file_operations shmem_file_operations = {
        .llseek         = shmem_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = shmem_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = shmem_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = noop_fsync,
        .splice_read    = shmem_file_splice_read,
        .splice_write   = generic_file_splice_write,
index a3443278ce3a693b4c2625df3752a59e975abca3..e2e98af703ea9fdcbab4bcd034bc82a1482c9ca6 100644 (file)
@@ -56,6 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        continue;
                }
 
+#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
                /*
                 * For simplicity, we won't check this in the list of memcg
                 * caches. We have control over memcg naming, and if there
@@ -69,6 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        s = NULL;
                        return -EINVAL;
                }
+#endif
        }
 
        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
index 3963fc24fcc1b6f8c4d365de4d99bda8993311b7..de7c904e52e507079f5bae7a2b7854a9d7cf80b0 100644 (file)
@@ -1824,6 +1824,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        struct filename *pathname;
        int i, type, prev;
        int err;
+       unsigned int old_block_size;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -1914,6 +1915,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        }
 
        swap_file = p->swap_file;
+       old_block_size = p->old_block_size;
        p->swap_file = NULL;
        p->max = 0;
        swap_map = p->swap_map;
@@ -1938,7 +1940,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        inode = mapping->host;
        if (S_ISBLK(inode->i_mode)) {
                struct block_device *bdev = I_BDEV(inode);
-               set_blocksize(bdev, p->old_block_size);
+               set_blocksize(bdev, old_block_size);
                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
        } else {
                mutex_lock(&inode->i_mutex);
index 8ed1b775bdc9cafe9aaf2ab74e4acf842ffe1a9d..eea668d9cff6c578ada0cf6c02eca5e22de5598d 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
 
 #include "internal.h"
 
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       struct mem_cgroup *root = sc->target_mem_cgroup;
-       return !mem_cgroup_disabled() &&
-               mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
-}
 #else
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       return false;
-}
 #endif
 
 unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -222,6 +211,7 @@ void unregister_shrinker(struct shrinker *shrinker)
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
+       kfree(shrinker->nr_deferred);
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
@@ -1125,7 +1115,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
        LIST_HEAD(clean_pages);
 
        list_for_each_entry_safe(page, next, page_list, lru) {
-               if (page_is_file_cache(page) && !PageDirty(page)) {
+               if (page_is_file_cache(page) && !PageDirty(page) &&
+                   !isolated_balloon_page(page)) {
                        ClearPageActive(page);
                        list_move(&page->lru, &clean_pages);
                }
@@ -2176,11 +2167,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
        }
 }
 
-static int
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
 {
        unsigned long nr_reclaimed, nr_scanned;
-       int groups_scanned = 0;
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2177,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                        .zone = zone,
                        .priority = sc->priority,
                };
-               struct mem_cgroup *memcg = NULL;
-               mem_cgroup_iter_filter filter = (soft_reclaim) ?
-                       mem_cgroup_soft_reclaim_eligible : NULL;
+               struct mem_cgroup *memcg;
 
                nr_reclaimed = sc->nr_reclaimed;
                nr_scanned = sc->nr_scanned;
 
-               while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
+               memcg = mem_cgroup_iter(root, NULL, &reclaim);
+               do {
                        struct lruvec *lruvec;
 
-                       groups_scanned++;
                        lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
                        shrink_lruvec(lruvec, sc);
@@ -2218,7 +2205,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                                mem_cgroup_iter_break(root, memcg);
                                break;
                        }
-               }
+                       memcg = mem_cgroup_iter(root, memcg, &reclaim);
+               } while (memcg);
 
                vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
                           sc->nr_scanned - nr_scanned,
@@ -2226,37 +2214,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
 
        } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
                                         sc->nr_scanned - nr_scanned, sc));
-
-       return groups_scanned;
-}
-
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
-       bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
-       unsigned long nr_scanned = sc->nr_scanned;
-       int scanned_groups;
-
-       scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
-       /*
-        * memcg iterator might race with other reclaimer or start from
-        * a incomplete tree walk so the tree walk in __shrink_zone
-        * might have missed groups that are above the soft limit. Try
-        * another loop to catch up with others. Do it just once to
-        * prevent from reclaim latencies when other reclaimers always
-        * preempt this one.
-        */
-       if (do_soft_reclaim && !scanned_groups)
-               __shrink_zone(zone, sc, do_soft_reclaim);
-
-       /*
-        * No group is over the soft limit or those that are do not have
-        * pages in the zone we are reclaiming so we have to reclaim everybody
-        */
-       if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
-               __shrink_zone(zone, sc, false);
-               return;
-       }
 }
 
 /* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2277,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        bool aborted_reclaim = false;
 
        /*
@@ -2359,6 +2318,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                                        continue;
                                }
                        }
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
                        /* need some check for avoid more shrink_zone() */
                }
 
@@ -2952,6 +2923,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 {
        int i;
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .priority = DEF_PRIORITY,
@@ -3066,6 +3039,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 
                        sc.nr_scanned = 0;
 
+                       nr_soft_scanned = 0;
+                       /*
+                        * Call soft limit reclaim before calling shrink_zone.
+                        */
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       order, sc.gfp_mask,
+                                                       &nr_soft_scanned);
+                       sc.nr_reclaimed += nr_soft_reclaimed;
+
                        /*
                         * There should be no need to raise the scanning
                         * priority if enough pages are already being scanned
index 841e35f1db22caff3afd03c104403f0c744776cb..d93510c6aa2da860d779570cbf0a4c7cee278ad5 100644 (file)
@@ -804,6 +804,10 @@ static void zswap_frontswap_invalidate_area(unsigned type)
        }
        tree->rbroot = RB_ROOT;
        spin_unlock(&tree->lock);
+
+       zbud_destroy_pool(tree->pool);
+       kfree(tree);
+       zswap_trees[type] = NULL;
 }
 
 static struct zbud_ops zswap_zbud_ops = {
index 1eb05d80b07bea736e85a389be0d98c8bfcb3d9c..3ed616215870cf73b4d8a516d52e5da04d9472e5 100644 (file)
 static unsigned int mrp_join_time __read_mostly = 200;
 module_param(mrp_join_time, uint, 0644);
 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
+
+static unsigned int mrp_periodic_time __read_mostly = 1000;
+module_param(mrp_periodic_time, uint, 0644);
+MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
+
 MODULE_LICENSE("GPL");
 
 static const u8
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data)
        mrp_join_timer_arm(app);
 }
 
+static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+{
+       mod_timer(&app->periodic_timer,
+                 jiffies + msecs_to_jiffies(mrp_periodic_time));
+}
+
+static void mrp_periodic_timer(unsigned long data)
+{
+       struct mrp_applicant *app = (struct mrp_applicant *)data;
+
+       spin_lock(&app->lock);
+       mrp_mad_event(app, MRP_EVENT_PERIODIC);
+       mrp_pdu_queue(app);
+       spin_unlock(&app->lock);
+
+       mrp_periodic_timer_arm(app);
+}
+
 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
 {
        __be16 endmark;
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
        rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
        setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
        mrp_join_timer_arm(app);
+       setup_timer(&app->periodic_timer, mrp_periodic_timer,
+                   (unsigned long)app);
+       mrp_periodic_timer_arm(app);
        return 0;
 
 err3:
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
         * all pending messages before the applicant is gone.
         */
        del_timer_sync(&app->join_timer);
+       del_timer_sync(&app->periodic_timer);
 
        spin_lock_bh(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
index 61fc573f1142f707fee7d886e67153a62c91c14a..b3d17d1c49c3fbecdd5701da18c14c9767f7d969 100644 (file)
@@ -98,14 +98,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_request_leave(dev);
 
        vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+       netdev_upper_dev_unlink(real_dev, dev);
        /* Because unregister_netdevice_queue() makes sure at least one rcu
         * grace period is respected before device freeing,
         * we dont need to call synchronize_net() here.
         */
        unregister_netdevice_queue(dev, head);
 
-       netdev_upper_dev_unlink(real_dev, dev);
-
        if (grp->nr_vlan_devs == 0) {
                vlan_mvrp_uninit_applicant(real_dev);
                vlan_gvrp_uninit_applicant(real_dev);
@@ -169,13 +169,13 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       err = netdev_upper_dev_link(real_dev, dev);
-       if (err)
-               goto out_uninit_mvrp;
-
        err = register_netdevice(dev);
        if (err < 0)
-               goto out_upper_dev_unlink;
+               goto out_uninit_mvrp;
+
+       err = netdev_upper_dev_link(real_dev, dev);
+       if (err)
+               goto out_unregister_netdev;
 
        /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
@@ -191,8 +191,8 @@ int register_vlan_dev(struct net_device *dev)
 
        return 0;
 
-out_upper_dev_unlink:
-       netdev_upper_dev_unlink(real_dev, dev);
+out_unregister_netdev:
+       unregister_netdevice(dev);
 out_uninit_mvrp:
        if (grp->nr_vlan_devs == 0)
                vlan_mvrp_uninit_applicant(real_dev);
index ba5983f34c42f8076f4416af164f229678ffc4e1..a2caf00b82cc7bcf23526305917691d96855f761 100644 (file)
@@ -196,12 +196,12 @@ static inline u32 vlan_get_ingress_priority(struct net_device *dev,
 }
 
 #ifdef CONFIG_VLAN_8021Q_GVRP
-extern int vlan_gvrp_request_join(const struct net_device *dev);
-extern void vlan_gvrp_request_leave(const struct net_device *dev);
-extern int vlan_gvrp_init_applicant(struct net_device *dev);
-extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_gvrp_init(void);
-extern void vlan_gvrp_uninit(void);
+int vlan_gvrp_request_join(const struct net_device *dev);
+void vlan_gvrp_request_leave(const struct net_device *dev);
+int vlan_gvrp_init_applicant(struct net_device *dev);
+void vlan_gvrp_uninit_applicant(struct net_device *dev);
+int vlan_gvrp_init(void);
+void vlan_gvrp_uninit(void);
 #else
 static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
@@ -212,12 +212,12 @@ static inline void vlan_gvrp_uninit(void) {}
 #endif
 
 #ifdef CONFIG_VLAN_8021Q_MVRP
-extern int vlan_mvrp_request_join(const struct net_device *dev);
-extern void vlan_mvrp_request_leave(const struct net_device *dev);
-extern int vlan_mvrp_init_applicant(struct net_device *dev);
-extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_mvrp_init(void);
-extern void vlan_mvrp_uninit(void);
+int vlan_mvrp_request_join(const struct net_device *dev);
+void vlan_mvrp_request_leave(const struct net_device *dev);
+int vlan_mvrp_init_applicant(struct net_device *dev);
+void vlan_mvrp_uninit_applicant(struct net_device *dev);
+int vlan_mvrp_init(void);
+void vlan_mvrp_uninit(void);
 #else
 static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
@@ -229,8 +229,8 @@ static inline void vlan_mvrp_uninit(void) {}
 
 extern const char vlan_fullname[];
 extern const char vlan_version[];
-extern int vlan_netlink_init(void);
-extern void vlan_netlink_fini(void);
+int vlan_netlink_init(void);
+void vlan_netlink_fini(void);
 
 extern struct rtnl_link_ops vlan_link_ops;
 
index 309129732285fd610159446bade5ede27c835d1c..c7e634af85165613822074b28ceeca4af7153ae7 100644 (file)
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
 
        return nla_total_size(2) +      /* IFLA_VLAN_PROTOCOL */
               nla_total_size(2) +      /* IFLA_VLAN_ID */
-              sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+              nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
               vlan_qos_map_size(vlan->nr_ingress_mappings) +
               vlan_qos_map_size(vlan->nr_egress_mappings);
 }
index 4b4d2b779ec1a08202303863fea1b74c67f04383..a00123ebb0ae0705c8e45e8433a07e96a2a06fe5 100644 (file)
@@ -1735,7 +1735,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        res = -EFAULT;
                        break;
                }
-               if (amount > AX25_NOUID_BLOCK) {
+               if (amount < 0 || amount > AX25_NOUID_BLOCK) {
                        res = -EINVAL;
                        break;
                }
index 489bb36f1b9464381d2aa83c78740977e3899b77..4f4aabbd8eab24c4d12983ccf656966f4f7084b0 100644 (file)
@@ -24,6 +24,7 @@ batman-adv-y += bitarray.o
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += debugfs.o
 batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
+batman-adv-y += fragmentation.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
@@ -37,5 +38,3 @@ batman-adv-y += send.o
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
-batman-adv-y += unicast.o
-batman-adv-y += vis.o
index 0a8a80cd4bf19819f797169a9b5c6122f3dc47b3..a2b480a908723a37d47d14bd76e63628518e6561 100644 (file)
@@ -87,22 +87,198 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
        return (uint8_t)(sum / count);
 }
 
+/**
+ * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ *  orig_node
+ * @orig_node: the orig_node for which the resources have to be free'd
+ */
+static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
+{
+       kfree(orig_node->bat_iv.bcast_own);
+       kfree(orig_node->bat_iv.bcast_own_sum);
+}
+
+/**
+ * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
+ *  include the new hard-interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+                                    int max_if_num)
+{
+       void *data_ptr;
+       size_t data_size, old_size;
+       int ret = -ENOMEM;
+
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       data_ptr = kmalloc(data_size, GFP_ATOMIC);
+       if (!data_ptr)
+               goto unlock;
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
+       kfree(orig_node->bat_iv.bcast_own);
+       orig_node->bat_iv.bcast_own = data_ptr;
+
+       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+       if (!data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own);
+               goto unlock;
+       }
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+              (max_if_num - 1) * sizeof(uint8_t));
+       kfree(orig_node->bat_iv.bcast_own_sum);
+       orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+       ret = 0;
+
+unlock:
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
+ *  exclude the removed interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+                                    int max_if_num, int del_if_num)
+{
+       int chunk_size,  ret = -ENOMEM, if_offset;
+       void *data_ptr = NULL;
+
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       /* last interface was removed */
+       if (max_if_num == 0)
+               goto free_bcast_own;
+
+       chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
+       data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
+       if (!data_ptr)
+               goto unlock;
+
+       /* copy first part */
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+       /* copy second part */
+       memcpy((char *)data_ptr + del_if_num * chunk_size,
+              orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
+              (max_if_num - del_if_num) * chunk_size);
+
+free_bcast_own:
+       kfree(orig_node->bat_iv.bcast_own);
+       orig_node->bat_iv.bcast_own = data_ptr;
+
+       if (max_if_num == 0)
+               goto free_own_sum;
+
+       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+       if (!data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own);
+               goto unlock;
+       }
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+              del_if_num * sizeof(uint8_t));
+
+       if_offset = (del_if_num + 1) * sizeof(uint8_t);
+       memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+              orig_node->bat_iv.bcast_own_sum + if_offset,
+              (max_if_num - del_if_num) * sizeof(uint8_t));
+
+free_own_sum:
+       kfree(orig_node->bat_iv.bcast_own_sum);
+       orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+       ret = 0;
+unlock:
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of the originator
+ *
+ * Returns the originator object corresponding to the passed mac address or NULL
+ * on failure.
+ * If the object does not exists it is created an initialised.
+ */
+static struct batadv_orig_node *
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+{
+       struct batadv_orig_node *orig_node;
+       int size, hash_added;
+
+       orig_node = batadv_orig_hash_find(bat_priv, addr);
+       if (orig_node)
+               return orig_node;
+
+       orig_node = batadv_orig_node_new(bat_priv, addr);
+       if (!orig_node)
+               return NULL;
+
+       spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
+
+       size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC);
+       if (!orig_node->bat_iv.bcast_own)
+               goto free_orig_node;
+
+       size = bat_priv->num_ifaces * sizeof(uint8_t);
+       orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
+       if (!orig_node->bat_iv.bcast_own_sum)
+               goto free_bcast_own;
+
+       hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+                                    batadv_choose_orig, orig_node,
+                                    &orig_node->hash_entry);
+       if (hash_added != 0)
+               goto free_bcast_own;
+
+       return orig_node;
+
+free_bcast_own:
+       kfree(orig_node->bat_iv.bcast_own);
+free_orig_node:
+       batadv_orig_node_free_ref(orig_node);
+
+       return NULL;
+}
+
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
                        const uint8_t *neigh_addr,
                        struct batadv_orig_node *orig_node,
                        struct batadv_orig_node *orig_neigh)
 {
+       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_neigh_node *neigh_node;
 
-       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr);
+       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
        if (!neigh_node)
                goto out;
 
-       INIT_LIST_HEAD(&neigh_node->bonding_list);
+       spin_lock_init(&neigh_node->bat_iv.lq_update_lock);
 
-       neigh_node->orig_node = orig_neigh;
-       neigh_node->if_incoming = hard_iface;
+       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                  "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+                  neigh_addr, orig_node->orig, hard_iface->net_dev->name);
 
        spin_lock_bh(&orig_node->neigh_list_lock);
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
@@ -135,9 +311,8 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
        batadv_ogm_packet->header.ttl = 2;
        batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+       batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
-       batadv_ogm_packet->tt_num_changes = 0;
-       batadv_ogm_packet->ttvn = 0;
 
        res = 0;
 
@@ -207,12 +382,12 @@ static uint8_t batadv_hop_penalty(uint8_t tq,
 
 /* is there another aggregated packet here? */
 static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-                                    int tt_num_changes)
+                                    __be16 tvlv_len)
 {
        int next_buff_pos = 0;
 
        next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-       next_buff_pos += batadv_tt_len(tt_num_changes);
+       next_buff_pos += ntohs(tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -240,7 +415,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-                                        batadv_ogm_packet->tt_num_changes)) {
+                                        batadv_ogm_packet->tvlv_len)) {
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
@@ -256,18 +431,18 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                        fwd_str = "Sending own";
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+                          "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
                           batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
                           (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
                            "on" : "off"),
-                          batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+                          hard_iface->net_dev->name,
                           hard_iface->net_dev->dev_addr);
 
                buff_pos += BATADV_OGM_HLEN;
-               buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+               buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
                packet_num++;
                packet_pos = forw_packet->skb->data + buff_pos;
                batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -601,7 +776,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                                  struct batadv_hard_iface *if_incoming)
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       uint8_t tt_num_changes;
+       uint16_t tvlv_len;
 
        if (batadv_ogm_packet->header.ttl <= 1) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -621,7 +796,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                        return;
        }
 
-       tt_num_changes = batadv_ogm_packet->tt_num_changes;
+       tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
        batadv_ogm_packet->header.ttl--;
        memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -642,7 +817,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
        batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
-                               BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+                               BATADV_OGM_HLEN + tvlv_len,
                                if_incoming, 0, batadv_iv_ogm_fwd_send_time());
 }
 
@@ -662,20 +837,22 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        uint32_t i;
        size_t word_index;
        uint8_t *w;
+       int if_num;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                        word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-                       word = &(orig_node->bcast_own[word_index]);
+                       word = &(orig_node->bat_iv.bcast_own[word_index]);
 
                        batadv_bit_get_packet(bat_priv, word, 1, 0);
-                       w = &orig_node->bcast_own_sum[hard_iface->if_num];
+                       if_num = hard_iface->if_num;
+                       w = &orig_node->bat_iv.bcast_own_sum[if_num];
                        *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+                       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                }
                rcu_read_unlock();
        }
@@ -688,43 +865,29 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if;
        int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
-       int vis_server, tt_num_changes = 0;
        uint32_t seqno;
-       uint8_t bandwidth;
+       uint16_t tvlv_len = 0;
 
-       vis_server = atomic_read(&bat_priv->vis_mode);
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
-       if (hard_iface == primary_if)
-               tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
-                                                      ogm_buff_len,
-                                                      BATADV_OGM_HLEN);
+       if (hard_iface == primary_if) {
+               /* tt changes have to be committed before the tvlv data is
+                * appended as it may alter the tt tvlv container
+                */
+               batadv_tt_local_commit_changes(bat_priv);
+               tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
+                                                           ogm_buff_len,
+                                                           BATADV_OGM_HLEN);
+       }
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+       batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
        /* change sequence number to network order */
        seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
        batadv_ogm_packet->seqno = htonl(seqno);
        atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
-       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
-       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
-       if (tt_num_changes >= 0)
-               batadv_ogm_packet->tt_num_changes = tt_num_changes;
-
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
-               batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
-       else
-               batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
-
-       if (hard_iface == primary_if &&
-           atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
-               bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-               batadv_ogm_packet->gw_flags = bandwidth;
-       } else {
-               batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
-       }
-
        batadv_iv_ogm_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
                                hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
@@ -770,18 +933,18 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                if (dup_status != BATADV_NO_DUP)
                        continue;
 
-               spin_lock_bh(&tmp_neigh_node->lq_update_lock);
-               batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
-                                      &tmp_neigh_node->tq_index, 0);
-               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
-               tmp_neigh_node->tq_avg = tq_avg;
-               spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
+               spin_lock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
+               batadv_ring_buffer_set(tmp_neigh_node->bat_iv.tq_recv,
+                                      &tmp_neigh_node->bat_iv.tq_index, 0);
+               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->bat_iv.tq_recv);
+               tmp_neigh_node->bat_iv.tq_avg = tq_avg;
+               spin_unlock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
        }
 
        if (!neigh_node) {
                struct batadv_orig_node *orig_tmp;
 
-               orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
+               orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
                if (!orig_tmp)
                        goto unlock;
 
@@ -798,80 +961,55 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
 
        rcu_read_unlock();
 
-       orig_node->flags = batadv_ogm_packet->flags;
        neigh_node->last_seen = jiffies;
 
-       spin_lock_bh(&neigh_node->lq_update_lock);
-       batadv_ring_buffer_set(neigh_node->tq_recv,
-                              &neigh_node->tq_index,
+       spin_lock_bh(&neigh_node->bat_iv.lq_update_lock);
+       batadv_ring_buffer_set(neigh_node->bat_iv.tq_recv,
+                              &neigh_node->bat_iv.tq_index,
                               batadv_ogm_packet->tq);
-       neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
-       spin_unlock_bh(&neigh_node->lq_update_lock);
+       tq_avg = batadv_ring_buffer_avg(neigh_node->bat_iv.tq_recv);
+       neigh_node->bat_iv.tq_avg = tq_avg;
+       spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
 
        if (dup_status == BATADV_NO_DUP) {
                orig_node->last_ttl = batadv_ogm_packet->header.ttl;
                neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
        }
 
-       batadv_bonding_candidate_add(orig_node, neigh_node);
+       batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
 
        /* if this neighbor already is our next hop there is nothing
         * to change
         */
        router = batadv_orig_node_get_router(orig_node);
        if (router == neigh_node)
-               goto update_tt;
+               goto out;
 
        /* if this neighbor does not offer a better TQ we won't consider it */
-       if (router && (router->tq_avg > neigh_node->tq_avg))
-               goto update_tt;
+       if (router && (router->bat_iv.tq_avg > neigh_node->bat_iv.tq_avg))
+               goto out;
 
        /* if the TQ is the same and the link not more symmetric we
         * won't consider it either
         */
-       if (router && (neigh_node->tq_avg == router->tq_avg)) {
+       if (router && (neigh_node->bat_iv.tq_avg == router->bat_iv.tq_avg)) {
                orig_node_tmp = router->orig_node;
-               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
                if_num = router->if_incoming->if_num;
-               sum_orig = orig_node_tmp->bcast_own_sum[if_num];
-               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+               sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+               spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
                orig_node_tmp = neigh_node->orig_node;
-               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
                if_num = neigh_node->if_incoming->if_num;
-               sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
-               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+               sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+               spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
                if (sum_orig >= sum_neigh)
-                       goto update_tt;
+                       goto out;
        }
 
        batadv_update_route(bat_priv, orig_node, neigh_node);
-
-update_tt:
-       /* I have to check for transtable changes only if the OGM has been
-        * sent through a primary interface
-        */
-       if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
-            (batadv_ogm_packet->header.ttl > 2)) ||
-           (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
-               batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
-                                     batadv_ogm_packet->tt_num_changes,
-                                     batadv_ogm_packet->ttvn,
-                                     ntohs(batadv_ogm_packet->tt_crc));
-
-       if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
-               batadv_gw_node_update(bat_priv, orig_node,
-                                     batadv_ogm_packet->gw_flags);
-
-       orig_node->gw_flags = batadv_ogm_packet->gw_flags;
-
-       /* restart gateway selection if fast or late switching was enabled */
-       if ((orig_node->gw_flags) &&
-           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
-           (atomic_read(&bat_priv->gw_sel_class) > 2))
-               batadv_gw_check_election(bat_priv, orig_node);
-
        goto out;
 
 unlock:
@@ -893,7 +1031,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        uint8_t total_count;
        uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-       int tq_asym_penalty, inv_asym_penalty, ret = 0;
+       int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
        unsigned int combined_tq;
 
        /* find corresponding one hop neighbor */
@@ -931,10 +1069,11 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        orig_node->last_seen = jiffies;
 
        /* find packet count of corresponding one hop neighbor */
-       spin_lock_bh(&orig_node->ogm_cnt_lock);
-       orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
-       neigh_rq_count = neigh_node->real_packet_count;
-       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       if_num = if_incoming->if_num;
+       orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+       neigh_rq_count = neigh_node->bat_iv.real_packet_count;
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
        if (orig_eq_count > neigh_rq_count)
@@ -1016,12 +1155,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
        uint8_t packet_count;
+       unsigned long *bitmap;
 
-       orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+       orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
                return BATADV_NO_DUP;
 
-       spin_lock_bh(&orig_node->ogm_cnt_lock);
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
        seq_diff = seqno - orig_node->last_real_seqno;
 
        /* signalize caller that the packet is to be dropped. */
@@ -1036,7 +1176,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
                neigh_addr = tmp_neigh_node->addr;
-               is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+               is_dup = batadv_test_bit(tmp_neigh_node->bat_iv.real_bits,
                                         orig_node->last_real_seqno,
                                         seqno);
 
@@ -1052,13 +1192,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                }
 
                /* if the window moved, set the update flag. */
-               need_update |= batadv_bit_get_packet(bat_priv,
-                                                    tmp_neigh_node->real_bits,
+               bitmap = tmp_neigh_node->bat_iv.real_bits;
+               need_update |= batadv_bit_get_packet(bat_priv, bitmap,
                                                     seq_diff, set_mark);
 
-               packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+               packet_count = bitmap_weight(tmp_neigh_node->bat_iv.real_bits,
                                             BATADV_TQ_LOCAL_WINDOW_SIZE);
-               tmp_neigh_node->real_packet_count = packet_count;
+               tmp_neigh_node->bat_iv.real_packet_count = packet_count;
        }
        rcu_read_unlock();
 
@@ -1070,7 +1210,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        }
 
 out:
-       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
        batadv_orig_node_free_ref(orig_node);
        return ret;
 }
@@ -1082,7 +1222,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_hard_iface *hard_iface;
-       struct batadv_orig_node *orig_neigh_node, *orig_node;
+       struct batadv_orig_node *orig_neigh_node, *orig_node, *orig_node_tmp;
        struct batadv_neigh_node *router = NULL, *router_router = NULL;
        struct batadv_neigh_node *orig_neigh_router = NULL;
        int has_directlink_flag;
@@ -1122,13 +1262,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                is_single_hop_neigh = true;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
+                  "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
                   ethhdr->h_source, if_incoming->net_dev->name,
                   if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
                   batadv_ogm_packet->prev_sender,
-                  ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
-                  ntohs(batadv_ogm_packet->tt_crc),
-                  batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+                  ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
                   batadv_ogm_packet->header.ttl,
                   batadv_ogm_packet->header.version, has_directlink_flag);
 
@@ -1168,8 +1306,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                int16_t if_num;
                uint8_t *weight;
 
-               orig_neigh_node = batadv_get_orig_node(bat_priv,
-                                                      ethhdr->h_source);
+               orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+                                                        ethhdr->h_source);
                if (!orig_neigh_node)
                        return;
 
@@ -1183,15 +1321,15 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                        if_num = if_incoming->if_num;
                        offset = if_num * BATADV_NUM_WORDS;
 
-                       spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
-                       word = &(orig_neigh_node->bcast_own[offset]);
+                       spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+                       word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
                        bit_pos = if_incoming_seqno - 2;
                        bit_pos -= ntohl(batadv_ogm_packet->seqno);
                        batadv_set_bit(word, bit_pos);
-                       weight = &orig_neigh_node->bcast_own_sum[if_num];
+                       weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num];
                        *weight = bitmap_weight(word,
                                                BATADV_TQ_LOCAL_WINDOW_SIZE);
-                       spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+                       spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
                }
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -1214,7 +1352,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                return;
        }
 
-       orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+       orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
                return;
 
@@ -1235,10 +1373,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        }
 
        router = batadv_orig_node_get_router(orig_node);
-       if (router)
-               router_router = batadv_orig_node_get_router(router->orig_node);
+       if (router) {
+               orig_node_tmp = router->orig_node;
+               router_router = batadv_orig_node_get_router(orig_node_tmp);
+       }
 
-       if ((router && router->tq_avg != 0) &&
+       if ((router && router->bat_iv.tq_avg != 0) &&
            (batadv_compare_eth(router->addr, ethhdr->h_source)))
                is_from_best_next_hop = true;
 
@@ -1254,14 +1394,16 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                goto out;
        }
 
+       batadv_tvlv_ogm_receive(bat_priv, batadv_ogm_packet, orig_node);
+
        /* if sender is a direct neighbor the sender mac equals
         * originator mac
         */
        if (is_single_hop_neigh)
                orig_neigh_node = orig_node;
        else
-               orig_neigh_node = batadv_get_orig_node(bat_priv,
-                                                      ethhdr->h_source);
+               orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+                                                        ethhdr->h_source);
 
        if (!orig_neigh_node)
                goto out;
@@ -1350,9 +1492,9 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct ethhdr *ethhdr;
        int buff_pos = 0, packet_len;
-       unsigned char *tt_buff, *packet_buff;
-       bool ret;
+       unsigned char *tvlv_buff, *packet_buff;
        uint8_t *packet_pos;
+       bool ret;
 
        ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
        if (!ret)
@@ -1375,14 +1517,14 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
        /* unpack the aggregated packets and process them one by one */
        while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
-                                        batadv_ogm_packet->tt_num_changes)) {
-               tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
+                                        batadv_ogm_packet->tvlv_len)) {
+               tvlv_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
 
-               batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
-                                     if_incoming);
+               batadv_iv_ogm_process(ethhdr, batadv_ogm_packet,
+                                     tvlv_buff, if_incoming);
 
                buff_pos += BATADV_OGM_HLEN;
-               buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+               buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
 
                packet_pos = packet_buff + buff_pos;
                batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -1392,6 +1534,106 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        return NET_RX_SUCCESS;
 }
 
+/**
+ * batadv_iv_ogm_orig_print - print the originator table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: debugfs table seq_file struct
+ */
+static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
+                                    struct seq_file *seq)
+{
+       struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
+       struct batadv_hashtable *hash = bat_priv->orig_hash;
+       int last_seen_msecs, last_seen_secs;
+       struct batadv_orig_node *orig_node;
+       unsigned long last_seen_jiffies;
+       struct hlist_head *head;
+       int batman_count = 0;
+       uint32_t i;
+
+       seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
+                  "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+                  "Nexthop", "outgoingIF", "Potential nexthops");
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+                       neigh_node = batadv_orig_node_get_router(orig_node);
+                       if (!neigh_node)
+                               continue;
+
+                       if (neigh_node->bat_iv.tq_avg == 0)
+                               goto next;
+
+                       last_seen_jiffies = jiffies - orig_node->last_seen;
+                       last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+                       last_seen_secs = last_seen_msecs / 1000;
+                       last_seen_msecs = last_seen_msecs % 1000;
+
+                       seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
+                                  orig_node->orig, last_seen_secs,
+                                  last_seen_msecs, neigh_node->bat_iv.tq_avg,
+                                  neigh_node->addr,
+                                  neigh_node->if_incoming->net_dev->name);
+
+                       hlist_for_each_entry_rcu(neigh_node_tmp,
+                                                &orig_node->neigh_list, list) {
+                               seq_printf(seq, " %pM (%3i)",
+                                          neigh_node_tmp->addr,
+                                          neigh_node_tmp->bat_iv.tq_avg);
+                       }
+
+                       seq_puts(seq, "\n");
+                       batman_count++;
+
+next:
+                       batadv_neigh_node_free_ref(neigh_node);
+               }
+               rcu_read_unlock();
+       }
+
+       if (batman_count == 0)
+               seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+                                  struct batadv_neigh_node *neigh2)
+{
+       uint8_t tq1, tq2;
+
+       tq1 = neigh1->bat_iv.tq_avg;
+       tq2 = neigh2->bat_iv.tq_avg;
+
+       return tq1 - tq2;
+}
+
+/**
+ * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
+ *  neigh2 from the metric prospective
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns true if the metric via neigh1 is equally good or better than the
+ * metric via neigh2, false otherwise.
+ */
+static bool batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
+                                      struct batadv_neigh_node *neigh2)
+{
+       int diff = batadv_iv_ogm_neigh_cmp(neigh1, neigh2);
+
+       return diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+}
+
 static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        .name = "BATMAN_IV",
        .bat_iface_enable = batadv_iv_ogm_iface_enable,
@@ -1400,6 +1642,12 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
        .bat_ogm_schedule = batadv_iv_ogm_schedule,
        .bat_ogm_emit = batadv_iv_ogm_emit,
+       .bat_neigh_cmp = batadv_iv_ogm_neigh_cmp,
+       .bat_neigh_is_equiv_or_better = batadv_iv_ogm_neigh_is_eob,
+       .bat_orig_print = batadv_iv_ogm_orig_print,
+       .bat_orig_free = batadv_iv_ogm_orig_free,
+       .bat_orig_add_if = batadv_iv_ogm_orig_add_if,
+       .bat_orig_del_if = batadv_iv_ogm_orig_del_if,
 };
 
 int __init batadv_iv_init(void)
index 264de88db3208290b940cc27b09e44f79e1616c9..28eb5e6d0a02510a0a7b4e268f7dc34a26bc182a 100644 (file)
@@ -411,10 +411,10 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
                return NULL;
        }
 
-       /* this is a gateway now, remove any tt entries */
+       /* this is a gateway now, remove any TT entry on this VLAN */
        orig_node = batadv_orig_hash_find(bat_priv, orig);
        if (orig_node) {
-               batadv_tt_global_del_orig(bat_priv, orig_node,
+               batadv_tt_global_del_orig(bat_priv, orig_node, vid,
                                          "became a backbone gateway");
                batadv_orig_node_free_ref(orig_node);
        }
@@ -858,30 +858,28 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct ethhdr *ethhdr;
+       struct batadv_bla_claim_dst *bla_dst;
+       uint8_t *hw_src, *hw_dst;
        struct vlan_ethhdr *vhdr;
+       struct ethhdr *ethhdr;
        struct arphdr *arphdr;
-       uint8_t *hw_src, *hw_dst;
-       struct batadv_bla_claim_dst *bla_dst;
-       uint16_t proto;
+       unsigned short vid;
+       __be16 proto;
        int headlen;
-       unsigned short vid = BATADV_NO_FLAGS;
        int ret;
 
+       vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+       proto = ethhdr->h_proto;
+       headlen = ETH_HLEN;
+       if (vid & BATADV_VLAN_HAS_TAG) {
                vhdr = (struct vlan_ethhdr *)ethhdr;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
-               proto = ntohs(vhdr->h_vlan_encapsulated_proto);
-               headlen = sizeof(*vhdr);
-       } else {
-               proto = ntohs(ethhdr->h_proto);
-               headlen = ETH_HLEN;
+               proto = vhdr->h_vlan_encapsulated_proto;
+               headlen += VLAN_HLEN;
        }
 
-       if (proto != ETH_P_ARP)
+       if (proto != htons(ETH_P_ARP))
                return 0; /* not a claim frame */
 
        /* this must be a ARP frame. check if it is a claim. */
@@ -1317,12 +1315,14 @@ out:
 
 /* @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
+ * @vid: VLAN identifier
  *
- * check if the originator is a gateway for any VLAN ID.
+ * Check if the originator is a gateway for the VLAN identified by vid.
  *
- * returns 1 if it is found, 0 otherwise
+ * Returns true if orig is a backbone for this vid, false otherwise.
  */
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+                                   unsigned short vid)
 {
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
@@ -1330,25 +1330,26 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
        int i;
 
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
-               return 0;
+               return false;
 
        if (!hash)
-               return 0;
+               return false;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
-                       if (batadv_compare_eth(backbone_gw->orig, orig)) {
+                       if (batadv_compare_eth(backbone_gw->orig, orig) &&
+                           backbone_gw->vid == vid) {
                                rcu_read_unlock();
-                               return 1;
+                               return true;
                        }
                }
                rcu_read_unlock();
        }
 
-       return 0;
+       return false;
 }
 
 
@@ -1365,10 +1366,8 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size)
 {
-       struct ethhdr *ethhdr;
-       struct vlan_ethhdr *vhdr;
        struct batadv_bla_backbone_gw *backbone_gw;
-       unsigned short vid = BATADV_NO_FLAGS;
+       unsigned short vid;
 
        if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
                return 0;
@@ -1377,16 +1376,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
                return 0;
 
-       ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
-
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-               if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
-                       return 0;
-
-               vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
-       }
+       vid = batadv_get_vid(skb, hdr_size);
 
        /* see if this originator is a backbone gw for this VLAN */
        backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
index 4b102e71e5bd63c2bee71f14bcdb0b799398ef5e..da173e760e775caab684d63d2a1749cb55e0b930 100644 (file)
@@ -30,7 +30,8 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
                                             void *offset);
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+                                   unsigned short vid);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
@@ -74,10 +75,11 @@ static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
        return 0;
 }
 
-static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
-                                                uint8_t *orig)
+static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+                                                 uint8_t *orig,
+                                                 unsigned short vid)
 {
-       return 0;
+       return false;
 }
 
 static inline int
index f186a55b23c3fbd1eda3a2d5877b5b781fd647eb..049a7a2ac5b69b83422a3292ee7ebb2183e36114 100644 (file)
@@ -28,7 +28,6 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 #include "soft-interface.h"
-#include "vis.h"
 #include "icmp_socket.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -300,12 +299,6 @@ static int batadv_transtable_local_open(struct inode *inode, struct file *file)
        return single_open(file, batadv_tt_local_seq_print_text, net_dev);
 }
 
-static int batadv_vis_data_open(struct inode *inode, struct file *file)
-{
-       struct net_device *net_dev = (struct net_device *)inode->i_private;
-       return single_open(file, batadv_vis_seq_print_text, net_dev);
-}
-
 struct batadv_debuginfo {
        struct attribute attr;
        const struct file_operations fops;
@@ -356,7 +349,6 @@ static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
                        batadv_transtable_local_open);
-static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
 #ifdef CONFIG_BATMAN_ADV_NC
 static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
 #endif
@@ -373,7 +365,6 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
        &batadv_debuginfo_dat_cache,
 #endif
        &batadv_debuginfo_transtable_local,
-       &batadv_debuginfo_vis_data,
 #ifdef CONFIG_BATMAN_ADV_NC
        &batadv_debuginfo_nc_nodes,
 #endif
index 06345d401588c949762edfa14aaeb3f018a8e294..6c8c3934bd7b44bac8643683683c494e003f1ed2 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <net/arp.h>
 
 #include "main.h"
@@ -29,7 +30,6 @@
 #include "send.h"
 #include "types.h"
 #include "translation-table.h"
-#include "unicast.h"
 
 static void batadv_dat_purge(struct work_struct *work);
 
@@ -206,15 +206,11 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
  */
 static uint32_t batadv_hash_dat(const void *data, uint32_t size)
 {
-       const unsigned char *key = data;
        uint32_t hash = 0;
-       size_t i;
+       const struct batadv_dat_entry *dat = data;
 
-       for (i = 0; i < 4; i++) {
-               hash += key[i];
-               hash += (hash << 10);
-               hash ^= (hash >> 6);
-       }
+       hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
+       hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
 
        hash += (hash << 3);
        hash ^= (hash >> 11);
@@ -228,21 +224,26 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
+ * @vid: VLAN identifier
  *
  * Returns the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
-batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
+                          unsigned short vid)
 {
        struct hlist_head *head;
-       struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
+       struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
        struct batadv_hashtable *hash = bat_priv->dat.hash;
        uint32_t index;
 
        if (!hash)
                return NULL;
 
-       index = batadv_hash_dat(&ip, hash->size);
+       to_find.ip = ip;
+       to_find.vid = vid;
+
+       index = batadv_hash_dat(&to_find, hash->size);
        head = &hash->table[index];
 
        rcu_read_lock();
@@ -266,22 +267,24 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
+ * @vid: VLAN identifier
  */
 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
-                                uint8_t *mac_addr)
+                                uint8_t *mac_addr, unsigned short vid)
 {
        struct batadv_dat_entry *dat_entry;
        int hash_added;
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
        /* if this entry is already known, just update it */
        if (dat_entry) {
                if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
                        memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
                dat_entry->last_update = jiffies;
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
-                          "Entry updated: %pI4 %pM\n", &dat_entry->ip,
-                          dat_entry->mac_addr);
+                          "Entry updated: %pI4 %pM (vid: %d)\n",
+                          &dat_entry->ip, dat_entry->mac_addr,
+                          BATADV_PRINT_VID(vid));
                goto out;
        }
 
@@ -290,12 +293,13 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
                goto out;
 
        dat_entry->ip = ip;
+       dat_entry->vid = vid;
        memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
        dat_entry->last_update = jiffies;
        atomic_set(&dat_entry->refcount, 2);
 
        hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
-                                    batadv_hash_dat, &dat_entry->ip,
+                                    batadv_hash_dat, dat_entry,
                                     &dat_entry->hash_entry);
 
        if (unlikely(hash_added != 0)) {
@@ -304,8 +308,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
                goto out;
        }
 
-       batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
-                  &dat_entry->ip, dat_entry->mac_addr);
+       batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
+                  &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid));
 
 out:
        if (dat_entry)
@@ -419,6 +423,10 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        bool ret = false;
        int j;
 
+       /* check if orig node candidate is running DAT */
+       if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+               goto out;
+
        /* Check if this node has already been selected... */
        for (j = 0; j < select; j++)
                if (res[j].orig_node == candidate)
@@ -588,9 +596,9 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                        goto free_orig;
 
                tmp_skb = pskb_copy(skb, GFP_ATOMIC);
-               if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
-                                                     cand[i].orig_node,
-                                                     packet_subtype)) {
+               if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
+                                                          cand[i].orig_node,
+                                                          packet_subtype)) {
                        kfree_skb(tmp_skb);
                        goto free_neigh;
                }
@@ -625,6 +633,59 @@ out:
        return ret;
 }
 
+/**
+ * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       char dat_mode;
+
+       dat_mode = atomic_read(&bat_priv->distributed_arp_table);
+
+       switch (dat_mode) {
+       case 0:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+               break;
+       case 1:
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
+                                              NULL, 0);
+               break;
+       }
+}
+
+/**
+ * batadv_dat_status_update - update the dat tvlv container after dat
+ *  setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_dat_status_update(struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       batadv_dat_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                          struct batadv_orig_node *orig,
+                                          uint8_t flags,
+                                          void *tvlv_value,
+                                          uint16_t tvlv_value_len)
+{
+       if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+       else
+               orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+}
+
 /**
  * batadv_dat_hash_free - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
@@ -657,6 +718,10 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 
        batadv_dat_start_timer(bat_priv);
 
+       batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_DAT, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+       batadv_dat_tvlv_container_update(bat_priv);
        return 0;
 }
 
@@ -666,6 +731,9 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+
        cancel_delayed_work_sync(&bat_priv->dat.work);
 
        batadv_dat_hash_free(bat_priv);
@@ -693,8 +761,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
-       seq_printf(seq, "          %-7s          %-13s %5s\n", "IPv4", "MAC",
-                  "last-seen");
+       seq_printf(seq, "          %-7s          %-9s %4s %11s\n", "IPv4",
+                  "MAC", "VID", "last-seen");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -707,8 +775,9 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 60000;
                        last_seen_secs = last_seen_msecs / 1000;
 
-                       seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
+                       seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n",
                                   &dat_entry->ip, dat_entry->mac_addr,
+                                  BATADV_PRINT_VID(dat_entry->vid),
                                   last_seen_mins, last_seen_secs);
                }
                rcu_read_unlock();
@@ -794,6 +863,31 @@ out:
        return type;
 }
 
+/**
+ * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet to extract the VID from
+ * @hdr_size: the size of the batman-adv header encapsulating the packet
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
+{
+       unsigned short vid;
+
+       vid = batadv_get_vid(skb, *hdr_size);
+
+       /* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
+        * If the header contained in the packet is a VLAN one (which is longer)
+        * hdr_size is updated so that the functions will still skip the
+        * correct amount of bytes.
+        */
+       if (vid & BATADV_VLAN_HAS_TAG)
+               *hdr_size += VLAN_HLEN;
+
+       return vid;
+}
+
 /**
  * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
  * answer using DAT
@@ -813,26 +907,31 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
        bool ret = false;
        struct batadv_dat_entry *dat_entry = NULL;
        struct sk_buff *skb_new;
+       int hdr_size = 0;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
-       type = batadv_arp_get_type(bat_priv, skb, 0);
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
         * message to the selected DHT candidates
         */
        if (type != ARPOP_REQUEST)
                goto out;
 
-       batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
+       batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+                      "Parsing outgoing ARP REQUEST");
 
-       ip_src = batadv_arp_ip_src(skb, 0);
-       hw_src = batadv_arp_hw_src(skb, 0);
-       ip_dst = batadv_arp_ip_dst(skb, 0);
+       ip_src = batadv_arp_ip_src(skb, hdr_size);
+       hw_src = batadv_arp_hw_src(skb, hdr_size);
+       ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        if (dat_entry) {
                /* If the ARP request is destined for a local client the local
                 * client will answer itself. DAT would only generate a
@@ -842,7 +941,8 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
+                                       BATADV_NO_FLAGS)) {
                        ret = true;
                        goto out;
                }
@@ -853,11 +953,15 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                if (!skb_new)
                        goto out;
 
+               if (vid & BATADV_VLAN_HAS_TAG)
+                       skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+                                                 vid & VLAN_VID_MASK);
+
                skb_reset_mac_header(skb_new);
                skb_new->protocol = eth_type_trans(skb_new,
                                                   bat_priv->soft_iface);
                bat_priv->stats.rx_packets++;
-               bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+               bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
                bat_priv->soft_iface->last_rx = jiffies;
 
                netif_rx(skb_new);
@@ -892,11 +996,14 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        struct sk_buff *skb_new;
        struct batadv_dat_entry *dat_entry = NULL;
        bool ret = false;
+       unsigned short vid;
        int err;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
        type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REQUEST)
                goto out;
@@ -908,9 +1015,9 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        batadv_dbg_arp(bat_priv, skb, type, hdr_size,
                       "Parsing incoming ARP REQUEST");
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        if (!dat_entry)
                goto out;
 
@@ -921,17 +1028,22 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        if (!skb_new)
                goto out;
 
+       if (vid & BATADV_VLAN_HAS_TAG)
+               skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+                                         vid & VLAN_VID_MASK);
+
        /* To preserve backwards compatibility, the node has choose the outgoing
         * format based on the incoming request packet type. The assumption is
         * that a node not using the 4addr packet format doesn't support it.
         */
        if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
-               err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
-                                                   BATADV_P_DAT_CACHE_REPLY);
+               err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
+                                                  BATADV_P_DAT_CACHE_REPLY,
+                                                  vid);
        else
-               err = batadv_unicast_send_skb(bat_priv, skb_new);
+               err = batadv_send_skb_via_tt(bat_priv, skb_new, vid);
 
-       if (!err) {
+       if (err != NET_XMIT_DROP) {
                batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
                ret = true;
        }
@@ -954,23 +1066,28 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        uint16_t type;
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
+       int hdr_size = 0;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                return;
 
-       type = batadv_arp_get_type(bat_priv, skb, 0);
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REPLY)
                return;
 
-       batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
+       batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+                      "Parsing outgoing ARP REPLY");
 
-       hw_src = batadv_arp_hw_src(skb, 0);
-       ip_src = batadv_arp_ip_src(skb, 0);
-       hw_dst = batadv_arp_hw_dst(skb, 0);
-       ip_dst = batadv_arp_ip_dst(skb, 0);
+       hw_src = batadv_arp_hw_src(skb, hdr_size);
+       ip_src = batadv_arp_ip_src(skb, hdr_size);
+       hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+       ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
@@ -992,10 +1109,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
        bool ret = false;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
        type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REPLY)
                goto out;
@@ -1011,13 +1131,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        /* Update our internal cache with both the IP addresses the node got
         * within the ARP reply
         */
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
-       ret = !batadv_is_my_client(bat_priv, hw_dst);
+       ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
 out:
        if (ret)
                kfree_skb(skb);
@@ -1040,7 +1160,8 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
        __be32 ip_dst;
        struct batadv_dat_entry *dat_entry = NULL;
        bool ret = false;
-       const size_t bcast_len = sizeof(struct batadv_bcast_packet);
+       int hdr_size = sizeof(struct batadv_bcast_packet);
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
@@ -1051,12 +1172,14 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
        if (forw_packet->num_packets)
                goto out;
 
-       type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
+       vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
        if (type != ARPOP_REQUEST)
                goto out;
 
-       ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        /* check if the node already got this entry */
        if (!dat_entry) {
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
index 125c8c6fcfadfed4d8b388d3f7775877a5b6ff93..60d853beb8d8214c78d25f191aff77eafaabaefd 100644 (file)
@@ -29,6 +29,7 @@
 
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
+void batadv_dat_status_update(struct net_device *net_dev);
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb);
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
@@ -98,6 +99,10 @@ static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
 
 #else
 
+static inline void batadv_dat_status_update(struct net_device *net_dev)
+{
+}
+
 static inline bool
 batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                      struct sk_buff *skb)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
new file mode 100644 (file)
index 0000000..271d321
--- /dev/null
@@ -0,0 +1,491 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "fragmentation.h"
+#include "send.h"
+#include "originator.h"
+#include "routing.h"
+#include "hard-interface.h"
+#include "soft-interface.h"
+
+
+/**
+ * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * @head: head of chain with entries.
+ *
+ * Free fragments in the passed hlist. Should be called with appropriate lock.
+ */
+static void batadv_frag_clear_chain(struct hlist_head *head)
+{
+       struct batadv_frag_list_entry *entry;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_safe(entry, node, head, list) {
+               hlist_del(&entry->list);
+               kfree_skb(entry->skb);
+               kfree(entry);
+       }
+}
+
+/**
+ * batadv_frag_purge_orig - free fragments associated to an orig
+ * @orig_node: originator to free fragments from
+ * @check_cb: optional function to tell if an entry should be purged
+ */
+void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
+                           bool (*check_cb)(struct batadv_frag_table_entry *))
+{
+       struct batadv_frag_table_entry *chain;
+       uint8_t i;
+
+       for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+               chain = &orig_node->fragments[i];
+               spin_lock_bh(&orig_node->fragments[i].lock);
+
+               if (!check_cb || check_cb(chain)) {
+                       batadv_frag_clear_chain(&orig_node->fragments[i].head);
+                       orig_node->fragments[i].size = 0;
+               }
+
+               spin_unlock_bh(&orig_node->fragments[i].lock);
+       }
+}
+
+/**
+ * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ *
+ * Returns the maximum size of payload that can be fragmented.
+ */
+static int batadv_frag_size_limit(void)
+{
+       int limit = BATADV_FRAG_MAX_FRAG_SIZE;
+
+       limit -= sizeof(struct batadv_frag_packet);
+       limit *= BATADV_FRAG_MAX_FRAGMENTS;
+
+       return limit;
+}
+
+/**
+ * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * @chain: chain in fragments table to init
+ * @seqno: sequence number of the received fragment
+ *
+ * Make chain ready for a fragment with sequence number "seqno". Delete existing
+ * entries if they have an "old" sequence number.
+ *
+ * Caller must hold chain->lock.
+ *
+ * Returns true if chain is empty and caller can just insert the new fragment
+ * without searching for the right position.
+ */
+static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
+                                  uint16_t seqno)
+{
+       if (chain->seqno == seqno)
+               return false;
+
+       if (!hlist_empty(&chain->head))
+               batadv_frag_clear_chain(&chain->head);
+
+       chain->size = 0;
+       chain->seqno = seqno;
+
+       return true;
+}
+
+/**
+ * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * @orig_node: originator that the fragment was received from
+ * @skb: skb to insert
+ * @chain_out: list head to attach complete chains of fragments to
+ *
+ * Insert a new fragment into the reverse ordered chain in the right table
+ * entry. The hash table entry is cleared if "old" fragments exist in it.
+ *
+ * Returns true if skb is buffered, false on error. If the chain has all the
+ * fragments needed to merge the packet, the chain is moved to the passed head
+ * to avoid locking the chain in the table.
+ */
+static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+                                     struct sk_buff *skb,
+                                     struct hlist_head *chain_out)
+{
+       struct batadv_frag_table_entry *chain;
+       struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+       struct batadv_frag_packet *frag_packet;
+       uint8_t bucket;
+       uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+       bool ret = false;
+
+       /* Linearize packet to avoid linearizing 16 packets in a row when doing
+        * the later merge. Non-linear merge should be added to remove this
+        * linearization.
+        */
+       if (skb_linearize(skb) < 0)
+               goto err;
+
+       frag_packet = (struct batadv_frag_packet *)skb->data;
+       seqno = ntohs(frag_packet->seqno);
+       bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
+
+       frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
+       if (!frag_entry_new)
+               goto err;
+
+       frag_entry_new->skb = skb;
+       frag_entry_new->no = frag_packet->no;
+
+       /* Select entry in the "chain table" and delete any prior fragments
+        * with another sequence number. batadv_frag_init_chain() returns true,
+        * if the list is empty at return.
+        */
+       chain = &orig_node->fragments[bucket];
+       spin_lock_bh(&chain->lock);
+       if (batadv_frag_init_chain(chain, seqno)) {
+               hlist_add_head(&frag_entry_new->list, &chain->head);
+               chain->size = skb->len - hdr_size;
+               chain->timestamp = jiffies;
+               ret = true;
+               goto out;
+       }
+
+       /* Find the position for the new fragment. */
+       hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
+               /* Drop packet if fragment already exists. */
+               if (frag_entry_curr->no == frag_entry_new->no)
+                       goto err_unlock;
+
+               /* Order fragments from highest to lowest. */
+               if (frag_entry_curr->no < frag_entry_new->no) {
+                       hlist_add_before(&frag_entry_new->list,
+                                        &frag_entry_curr->list);
+                       chain->size += skb->len - hdr_size;
+                       chain->timestamp = jiffies;
+                       ret = true;
+                       goto out;
+               }
+       }
+
+       /* Reached the end of the list, so insert after 'frag_entry_curr'. */
+       if (likely(frag_entry_curr)) {
+               hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
+               chain->size += skb->len - hdr_size;
+               chain->timestamp = jiffies;
+               ret = true;
+       }
+
+out:
+       if (chain->size > batadv_frag_size_limit() ||
+           ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+               /* Clear chain if total size of either the list or the packet
+                * exceeds the maximum size of one merged packet.
+                */
+               batadv_frag_clear_chain(&chain->head);
+               chain->size = 0;
+       } else if (ntohs(frag_packet->total_size) == chain->size) {
+               /* All fragments received. Hand over chain to caller. */
+               hlist_move_list(&chain->head, chain_out);
+               chain->size = 0;
+       }
+
+err_unlock:
+       spin_unlock_bh(&chain->lock);
+
+err:
+       if (!ret)
+               kfree(frag_entry_new);
+
+       return ret;
+}
+
+/**
+ * batadv_frag_merge_packets - merge a chain of fragments
+ * @chain: head of chain with fragments
+ * @skb: packet with total size of skb after merging
+ *
+ * Expand the first skb in the chain and copy the content of the remaining
+ * skb's into the expanded one. After doing so, clear the chain.
+ *
+ * Returns the merged skb or NULL on error.
+ */
+static struct sk_buff *
+batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+{
+       struct batadv_frag_packet *packet;
+       struct batadv_frag_list_entry *entry;
+       struct sk_buff *skb_out = NULL;
+       int size, hdr_size = sizeof(struct batadv_frag_packet);
+
+       /* Make sure incoming skb has non-bogus data. */
+       packet = (struct batadv_frag_packet *)skb->data;
+       size = ntohs(packet->total_size);
+       if (size > batadv_frag_size_limit())
+               goto free;
+
+       /* Remove first entry, as this is the destination for the rest of the
+        * fragments.
+        */
+       entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
+       hlist_del(&entry->list);
+       skb_out = entry->skb;
+       kfree(entry);
+
+       /* Make room for the rest of the fragments. */
+       if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+               kfree_skb(skb_out);
+               skb_out = NULL;
+               goto free;
+       }
+
+       /* Move the existing MAC header to just before the payload. (Override
+        * the fragment header.)
+        */
+       skb_pull_rcsum(skb_out, hdr_size);
+       memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
+       skb_set_mac_header(skb_out, -ETH_HLEN);
+       skb_reset_network_header(skb_out);
+       skb_reset_transport_header(skb_out);
+
+       /* Copy the payload of the each fragment into the last skb */
+       hlist_for_each_entry(entry, chain, list) {
+               size = entry->skb->len - hdr_size;
+               memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
+                      size);
+       }
+
+free:
+       /* Locking is not needed, because 'chain' is not part of any orig. */
+       batadv_frag_clear_chain(chain);
+       return skb_out;
+}
+
+/**
+ * batadv_frag_skb_buffer - buffer fragment for later merge
+ * @skb: skb to buffer
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Add fragment to buffer and merge fragments if possible.
+ *
+ * There are three possible outcomes: 1) Packet is merged: Return true and
+ * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
+ * to NULL; 3) Error: Return false and leave skb as is.
+ */
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+                           struct batadv_orig_node *orig_node_src)
+{
+       struct sk_buff *skb_out = NULL;
+       struct hlist_head head = HLIST_HEAD_INIT;
+       bool ret = false;
+
+       /* Add packet to buffer and table entry if merge is possible. */
+       if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
+               goto out_err;
+
+       /* Leave if more fragments are needed to merge. */
+       if (hlist_empty(&head))
+               goto out;
+
+       skb_out = batadv_frag_merge_packets(&head, *skb);
+       if (!skb_out)
+               goto out_err;
+
+out:
+       *skb = skb_out;
+       ret = true;
+out_err:
+       return ret;
+}
+
+/**
+ * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * @skb: skb to forward
+ * @recv_if: interface that the skb is received on
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Look up the next-hop of the fragments payload and check if the merged packet
+ * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
+ * without merging it.
+ *
+ * Returns true if the fragment is consumed/forwarded, false otherwise.
+ */
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+                        struct batadv_hard_iface *recv_if,
+                        struct batadv_orig_node *orig_node_src)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_orig_node *orig_node_dst = NULL;
+       struct batadv_neigh_node *neigh_node = NULL;
+       struct batadv_frag_packet *packet;
+       uint16_t total_size;
+       bool ret = false;
+
+       packet = (struct batadv_frag_packet *)skb->data;
+       orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
+       if (!orig_node_dst)
+               goto out;
+
+       neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
+       if (!neigh_node)
+               goto out;
+
+       /* Forward the fragment, if the merged packet would be too big to
+        * be assembled.
+        */
+       total_size = ntohs(packet->total_size);
+       if (total_size > neigh_node->if_incoming->net_dev->mtu) {
+               batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
+               batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
+                                  skb->len + ETH_HLEN);
+
+               packet->header.ttl--;
+               batadv_send_skb_packet(skb, neigh_node->if_incoming,
+                                      neigh_node->addr);
+               ret = true;
+       }
+
+out:
+       if (orig_node_dst)
+               batadv_orig_node_free_ref(orig_node_dst);
+       if (neigh_node)
+               batadv_neigh_node_free_ref(neigh_node);
+       return ret;
+}
+
+/**
+ * batadv_frag_create - create a fragment from skb
+ * @skb: skb to create fragment from
+ * @frag_head: header to use in new fragment
+ * @mtu: size of new fragment
+ *
+ * Split the passed skb into two fragments: A new one with size matching the
+ * passed mtu and the old one with the rest. The new skb contains data from the
+ * tail of the old skb.
+ *
+ * Returns the new fragment, NULL on error.
+ */
+static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
+                                         struct batadv_frag_packet *frag_head,
+                                         unsigned int mtu)
+{
+       struct sk_buff *skb_fragment;
+       unsigned header_size = sizeof(*frag_head);
+       unsigned fragment_size = mtu - header_size;
+
+       skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
+       if (!skb_fragment)
+               goto err;
+
+       skb->priority = TC_PRIO_CONTROL;
+
+       /* Eat the last mtu-bytes of the skb */
+       skb_reserve(skb_fragment, header_size + ETH_HLEN);
+       skb_split(skb, skb_fragment, skb->len - fragment_size);
+
+       /* Add the header */
+       skb_push(skb_fragment, header_size);
+       memcpy(skb_fragment->data, frag_head, header_size);
+
+err:
+       return skb_fragment;
+}
+
+/**
+ * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * @skb: skb to create fragments from
+ * @orig_node: final destination of the created fragments
+ * @neigh_node: next-hop of the created fragments
+ *
+ * Returns true on success, false otherwise.
+ */
+bool batadv_frag_send_packet(struct sk_buff *skb,
+                            struct batadv_orig_node *orig_node,
+                            struct batadv_neigh_node *neigh_node)
+{
+       struct batadv_priv *bat_priv;
+       struct batadv_hard_iface *primary_if;
+       struct batadv_frag_packet frag_header;
+       struct sk_buff *skb_fragment;
+       unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
+       unsigned header_size = sizeof(frag_header);
+       unsigned max_fragment_size, max_packet_size;
+
+       /* To avoid merge and refragmentation at next-hops we never send
+        * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+        */
+       mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+       max_fragment_size = (mtu - header_size - ETH_HLEN);
+       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+       /* Don't even try to fragment, if we need more than 16 fragments */
+       if (skb->len > max_packet_size)
+               goto out_err;
+
+       bat_priv = orig_node->bat_priv;
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out_err;
+
+       /* Create one header to be copied to all fragments */
+       frag_header.header.packet_type = BATADV_UNICAST_FRAG;
+       frag_header.header.version = BATADV_COMPAT_VERSION;
+       frag_header.header.ttl = BATADV_TTL;
+       frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+       frag_header.reserved = 0;
+       frag_header.no = 0;
+       frag_header.total_size = htons(skb->len);
+       memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
+
+       /* Eat and send fragments from the tail of skb */
+       while (skb->len > max_fragment_size) {
+               skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+               if (!skb_fragment)
+                       goto out_err;
+
+               batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+               batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+                                  skb_fragment->len + ETH_HLEN);
+               batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
+                                      neigh_node->addr);
+               frag_header.no++;
+
+               /* The initial check in this function should cover this case */
+               if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
+                       goto out_err;
+       }
+
+       /* Make room for the fragment header. */
+       if (batadv_skb_head_push(skb, header_size) < 0 ||
+           pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
+               goto out_err;
+
+       memcpy(skb->data, &frag_header, header_size);
+
+       /* Send the last fragment */
+       batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+       batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+                          skb->len + ETH_HLEN);
+       batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+
+       return true;
+out_err:
+       return false;
+}
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
new file mode 100644 (file)
index 0000000..ca029e2
--- /dev/null
@@ -0,0 +1,50 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
+#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+
+void batadv_frag_purge_orig(struct batadv_orig_node *orig,
+                           bool (*check_cb)(struct batadv_frag_table_entry *));
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+                        struct batadv_hard_iface *recv_if,
+                        struct batadv_orig_node *orig_node_src);
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+                           struct batadv_orig_node *orig_node);
+bool batadv_frag_send_packet(struct sk_buff *skb,
+                            struct batadv_orig_node *orig_node,
+                            struct batadv_neigh_node *neigh_node);
+
+/**
+ * batadv_frag_check_entry - check if a list of fragments has timed out
+ * @frags_entry: table entry to check
+ *
+ * Returns true if the frags entry has timed out, false otherwise.
+ */
+static inline bool
+batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
+{
+       if (!hlist_empty(&frags_entry->head) &&
+           batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
+               return true;
+       else
+               return false;
+}
+
+#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
index 1ce4b8763ef289f3679177c5a1fdc96df6e6126f..2449afaa7638358500c350af3bc39fb5ccc2d72e 100644 (file)
@@ -118,7 +118,6 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
        uint32_t gw_divisor;
        uint8_t max_tq = 0;
-       int down, up;
        uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
@@ -138,14 +137,13 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                if (!atomic_inc_not_zero(&gw_node->refcount))
                        goto next;
 
-               tq_avg = router->tq_avg;
+               tq_avg = router->bat_iv.tq_avg;
 
                switch (atomic_read(&bat_priv->gw_sel_class)) {
                case 1: /* fast connection */
-                       batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
-                                                   &down, &up);
-
-                       tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+                       tmp_gw_factor = tq_avg * tq_avg;
+                       tmp_gw_factor *= gw_node->bandwidth_down;
+                       tmp_gw_factor *= 100 * 100;
                        tmp_gw_factor /= gw_divisor;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
@@ -223,11 +221,6 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
        struct batadv_neigh_node *router = NULL;
        char gw_addr[18] = { '\0' };
 
-       /* The batman daemon checks here if we already passed a full originator
-        * cycle in order to make sure we don't choose the first gateway we
-        * hear about. This check is based on the daemon's uptime which we
-        * don't have.
-        */
        if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
                goto out;
 
@@ -258,16 +251,22 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
                                    NULL);
        } else if ((!curr_gw) && (next_gw)) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+                          "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
                           next_gw->orig_node->orig,
-                          next_gw->orig_node->gw_flags, router->tq_avg);
+                          next_gw->bandwidth_down / 10,
+                          next_gw->bandwidth_down % 10,
+                          next_gw->bandwidth_up / 10,
+                          next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
                batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
                                    gw_addr);
        } else {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+                          "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
                           next_gw->orig_node->orig,
-                          next_gw->orig_node->gw_flags, router->tq_avg);
+                          next_gw->bandwidth_down / 10,
+                          next_gw->bandwidth_down % 10,
+                          next_gw->bandwidth_up / 10,
+                          next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
                batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
                                    gw_addr);
        }
@@ -306,8 +305,8 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
        if (!router_orig)
                goto out;
 
-       gw_tq_avg = router_gw->tq_avg;
-       orig_tq_avg = router_orig->tq_avg;
+       gw_tq_avg = router_gw->bat_iv.tq_avg;
+       orig_tq_avg = router_orig->bat_iv.tq_avg;
 
        /* the TQ value has to be better */
        if (orig_tq_avg < gw_tq_avg)
@@ -337,12 +336,20 @@ out:
        return;
 }
 
+/**
+ * batadv_gw_node_add - add gateway node to list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
-                              uint8_t new_gwflags)
+                              struct batadv_tvlv_gateway_data *gateway)
 {
        struct batadv_gw_node *gw_node;
-       int down, up;
+
+       if (gateway->bandwidth_down == 0)
+               return;
 
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
        if (!gw_node)
@@ -356,73 +363,116 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
        spin_unlock_bh(&bat_priv->gw.list_lock);
 
-       batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
-                  orig_node->orig, new_gwflags,
-                  (down > 2048 ? down / 1024 : down),
-                  (down > 2048 ? "MBit" : "KBit"),
-                  (up > 2048 ? up / 1024 : up),
-                  (up > 2048 ? "MBit" : "KBit"));
+                  "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+                  orig_node->orig,
+                  ntohl(gateway->bandwidth_down) / 10,
+                  ntohl(gateway->bandwidth_down) % 10,
+                  ntohl(gateway->bandwidth_up) / 10,
+                  ntohl(gateway->bandwidth_up) % 10);
 }
 
-void batadv_gw_node_update(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          uint8_t new_gwflags)
+/**
+ * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ *
+ * Returns gateway node if found or NULL otherwise.
+ */
+static struct batadv_gw_node *
+batadv_gw_node_get(struct batadv_priv *bat_priv,
+                  struct batadv_orig_node *orig_node)
 {
-       struct batadv_gw_node *gw_node, *curr_gw;
-
-       /* Note: We don't need a NULL check here, since curr_gw never gets
-        * dereferenced. If curr_gw is NULL we also should not exit as we may
-        * have this gateway in our list (duplication check!) even though we
-        * have no currently selected gateway.
-        */
-       curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+       struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-               if (gw_node->orig_node != orig_node)
+       hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
+               if (gw_node_tmp->orig_node != orig_node)
                        continue;
 
-               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Gateway class of originator %pM changed from %i to %i\n",
-                          orig_node->orig, gw_node->orig_node->gw_flags,
-                          new_gwflags);
+               if (gw_node_tmp->deleted)
+                       continue;
 
-               gw_node->deleted = 0;
+               if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
+                       continue;
 
-               if (new_gwflags == BATADV_NO_FLAGS) {
-                       gw_node->deleted = jiffies;
-                       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                                  "Gateway %pM removed from gateway list\n",
-                                  orig_node->orig);
+               gw_node = gw_node_tmp;
+               break;
+       }
+       rcu_read_unlock();
 
-                       if (gw_node == curr_gw)
-                               goto deselect;
-               }
+       return gw_node;
+}
 
-               goto unlock;
+/**
+ * batadv_gw_node_update - update list of available gateways with changed
+ *  bandwidth information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+                          struct batadv_orig_node *orig_node,
+                          struct batadv_tvlv_gateway_data *gateway)
+{
+       struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
+       gw_node = batadv_gw_node_get(bat_priv, orig_node);
+       if (!gw_node) {
+               batadv_gw_node_add(bat_priv, orig_node, gateway);
+               goto out;
        }
 
-       if (new_gwflags == BATADV_NO_FLAGS)
-               goto unlock;
+       if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
+           (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
+               goto out;
 
-       batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
-       goto unlock;
+       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                  "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
+                  orig_node->orig,
+                  gw_node->bandwidth_down / 10,
+                  gw_node->bandwidth_down % 10,
+                  gw_node->bandwidth_up / 10,
+                  gw_node->bandwidth_up % 10,
+                  ntohl(gateway->bandwidth_down) / 10,
+                  ntohl(gateway->bandwidth_down) % 10,
+                  ntohl(gateway->bandwidth_up) / 10,
+                  ntohl(gateway->bandwidth_up) % 10);
+
+       gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+       gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+       gw_node->deleted = 0;
+       if (ntohl(gateway->bandwidth_down) == 0) {
+               gw_node->deleted = jiffies;
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "Gateway %pM removed from gateway list\n",
+                          orig_node->orig);
 
-deselect:
-       batadv_gw_deselect(bat_priv);
-unlock:
-       rcu_read_unlock();
+               /* Note: We don't need a NULL check here, since curr_gw never
+                * gets dereferenced.
+                */
+               curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+               if (gw_node == curr_gw)
+                       batadv_gw_deselect(bat_priv);
+       }
 
+out:
        if (curr_gw)
                batadv_gw_node_free_ref(curr_gw);
+       if (gw_node)
+               batadv_gw_node_free_ref(gw_node);
 }
 
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node)
 {
-       batadv_gw_node_update(bat_priv, orig_node, 0);
+       struct batadv_tvlv_gateway_data gateway;
+
+       gateway.bandwidth_down = 0;
+       gateway.bandwidth_up = 0;
+
+       batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
 void batadv_gw_node_purge(struct batadv_priv *bat_priv)
@@ -467,9 +517,7 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *curr_gw;
        struct batadv_neigh_node *router;
-       int down, up, ret = -1;
-
-       batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+       int ret = -1;
 
        router = batadv_orig_node_get_router(gw_node->orig_node);
        if (!router)
@@ -477,16 +525,15 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
                         (curr_gw == gw_node ? "=>" : "  "),
                         gw_node->orig_node->orig,
-                        router->tq_avg, router->addr,
+                        router->bat_iv.tq_avg, router->addr,
                         router->if_incoming->net_dev->name,
-                        gw_node->orig_node->gw_flags,
-                        (down > 2048 ? down / 1024 : down),
-                        (down > 2048 ? "MBit" : "KBit"),
-                        (up > 2048 ? up / 1024 : up),
-                        (up > 2048 ? "MBit" : "KBit"));
+                        gw_node->bandwidth_down / 10,
+                        gw_node->bandwidth_down % 10,
+                        gw_node->bandwidth_up / 10,
+                        gw_node->bandwidth_up % 10);
 
        batadv_neigh_node_free_ref(router);
        if (curr_gw)
@@ -508,7 +555,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq,
-                  "      %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+                  "      %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
                   "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
                   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
                   primary_if->net_dev->dev_addr, net_dev->name);
@@ -603,24 +650,29 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        struct iphdr *iphdr;
        struct ipv6hdr *ipv6hdr;
        struct udphdr *udphdr;
+       struct vlan_ethhdr *vhdr;
+       __be16 proto;
 
        /* check for ethernet header */
        if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
                return false;
        ethhdr = (struct ethhdr *)skb->data;
+       proto = ethhdr->h_proto;
        *header_len += ETH_HLEN;
 
        /* check for initial vlan header */
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+       if (proto == htons(ETH_P_8021Q)) {
                if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
                        return false;
-               ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
+               vhdr = (struct vlan_ethhdr *)skb->data;
+               proto = vhdr->h_vlan_encapsulated_proto;
                *header_len += VLAN_HLEN;
        }
 
        /* check for ip header */
-       switch (ntohs(ethhdr->h_proto)) {
-       case ETH_P_IP:
+       switch (proto) {
+       case htons(ETH_P_IP):
                if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
                        return false;
                iphdr = (struct iphdr *)(skb->data + *header_len);
@@ -631,7 +683,7 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
                        return false;
 
                break;
-       case ETH_P_IPV6:
+       case htons(ETH_P_IPV6):
                if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
                        return false;
                ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
@@ -658,28 +710,44 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        *header_len += sizeof(*udphdr);
 
        /* check for bootp port */
-       if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
-           (ntohs(udphdr->dest) != 67))
+       if ((proto == htons(ETH_P_IP)) &&
+           (udphdr->dest != htons(67)))
                return false;
 
-       if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
-           (ntohs(udphdr->dest) != 547))
+       if ((proto == htons(ETH_P_IPV6)) &&
+           (udphdr->dest != htons(547)))
                return false;
 
        return true;
 }
 
-/* this call might reallocate skb data */
+/**
+ * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the outgoing packet
+ *
+ * Check if the skb is a DHCP request and if it is sent to the current best GW
+ * server. Due to topology changes it may be the case that the GW server
+ * previously selected is not the best one anymore.
+ *
+ * Returns true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
+ *
+ * This call might reallocate skb data.
+ */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                            struct sk_buff *skb)
 {
        struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
        struct batadv_orig_node *orig_dst_node = NULL;
-       struct batadv_gw_node *curr_gw = NULL;
+       struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
        struct ethhdr *ethhdr;
        bool ret, out_of_range = false;
        unsigned int header_len = 0;
        uint8_t curr_tq_avg;
+       unsigned short vid;
+
+       vid = batadv_get_vid(skb, 0);
 
        ret = batadv_gw_is_dhcp_target(skb, &header_len);
        if (!ret)
@@ -687,11 +755,12 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
 
        ethhdr = (struct ethhdr *)skb->data;
        orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-                                                ethhdr->h_dest);
+                                                ethhdr->h_dest, vid);
        if (!orig_dst_node)
                goto out;
 
-       if (!orig_dst_node->gw_flags)
+       gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+       if (!gw_node->bandwidth_down == 0)
                goto out;
 
        ret = batadv_is_type_dhcprequest(skb, header_len);
@@ -723,7 +792,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                if (!neigh_curr)
                        goto out;
 
-               curr_tq_avg = neigh_curr->tq_avg;
+               curr_tq_avg = neigh_curr->bat_iv.tq_avg;
                break;
        case BATADV_GW_MODE_OFF:
        default:
@@ -734,7 +803,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
        if (!neigh_old)
                goto out;
 
-       if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
+       if (curr_tq_avg - neigh_old->bat_iv.tq_avg > BATADV_GW_THRESHOLD)
                out_of_range = true;
 
 out:
@@ -742,6 +811,8 @@ out:
                batadv_orig_node_free_ref(orig_dst_node);
        if (curr_gw)
                batadv_gw_node_free_ref(curr_gw);
+       if (gw_node)
+               batadv_gw_node_free_ref(gw_node);
        if (neigh_old)
                batadv_neigh_node_free_ref(neigh_old);
        if (neigh_curr)
index ceef4ebe8bcd6a89711000c0d52b20e536c5375a..d95c2d23195ee962496667d449a25d7b58b2741c 100644 (file)
@@ -29,7 +29,7 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node);
 void batadv_gw_node_update(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node,
-                          uint8_t new_gwflags);
+                          struct batadv_tvlv_gateway_data *gateway);
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node);
 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
index 84bb2b18d7110a4597dc71c7b2f68c032d7750a9..b211b0f9cb788efa2b476616dd534675fb746b66 100644 (file)
 #include "gateway_common.h"
 #include "gateway_client.h"
 
-/* calculates the gateway class from kbit */
-static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
-{
-       int mdown = 0, tdown, tup, difference;
-       uint8_t sbit, part;
-
-       *gw_srv_class = 0;
-       difference = 0x0FFFFFFF;
-
-       /* test all downspeeds */
-       for (sbit = 0; sbit < 2; sbit++) {
-               for (part = 0; part < 16; part++) {
-                       tdown = 32 * (sbit + 2) * (1 << part);
-
-                       if (abs(tdown - down) < difference) {
-                               *gw_srv_class = (sbit << 7) + (part << 3);
-                               difference = abs(tdown - down);
-                               mdown = tdown;
-                       }
-               }
-       }
-
-       /* test all upspeeds */
-       difference = 0x0FFFFFFF;
-
-       for (part = 0; part < 8; part++) {
-               tup = ((part + 1) * (mdown)) / 8;
-
-               if (abs(tup - up) < difference) {
-                       *gw_srv_class = (*gw_srv_class & 0xF8) | part;
-                       difference = abs(tup - up);
-               }
-       }
-}
-
-/* returns the up and downspeeds in kbit, calculated from the class */
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
-{
-       int sbit = (gw_srv_class & 0x80) >> 7;
-       int dpart = (gw_srv_class & 0x78) >> 3;
-       int upart = (gw_srv_class & 0x07);
-
-       if (!gw_srv_class) {
-               *down = 0;
-               *up = 0;
-               return;
-       }
-
-       *down = 32 * (sbit + 2) * (1 << dpart);
-       *up = ((upart + 1) * (*down)) / 8;
-}
-
+/**
+ * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
+ *  and upload bandwidth information
+ * @net_dev: the soft interface net device
+ * @buff: string buffer to parse
+ * @down: pointer holding the returned download bandwidth information
+ * @up: pointer holding the returned upload bandwidth information
+ *
+ * Returns false on parse error and true otherwise.
+ */
 static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-                                     int *up, int *down)
+                                     uint32_t *down, uint32_t *up)
 {
-       int ret, multi = 1;
+       enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
        char *slash_ptr, *tmp_ptr;
        long ldown, lup;
+       int ret;
 
        slash_ptr = strchr(buff, '/');
        if (slash_ptr)
@@ -88,10 +47,10 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                tmp_ptr = buff + strlen(buff) - 4;
 
                if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-                       multi = 1024;
+                       bw_unit_type = BATADV_BW_UNIT_MBIT;
 
                if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-                   (multi > 1))
+                   (bw_unit_type == BATADV_BW_UNIT_MBIT))
                        *tmp_ptr = '\0';
        }
 
@@ -103,20 +62,28 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                return false;
        }
 
-       *down = ldown * multi;
+       switch (bw_unit_type) {
+       case BATADV_BW_UNIT_MBIT:
+               *down = ldown * 10;
+               break;
+       case BATADV_BW_UNIT_KBIT:
+       default:
+               *down = ldown / 100;
+               break;
+       }
 
        /* we also got some upload info */
        if (slash_ptr) {
-               multi = 1;
+               bw_unit_type = BATADV_BW_UNIT_KBIT;
 
                if (strlen(slash_ptr + 1) > 4) {
                        tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1);
 
                        if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-                               multi = 1024;
+                               bw_unit_type = BATADV_BW_UNIT_MBIT;
 
                        if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-                           (multi > 1))
+                           (bw_unit_type == BATADV_BW_UNIT_MBIT))
                                *tmp_ptr = '\0';
                }
 
@@ -128,52 +95,149 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                        return false;
                }
 
-               *up = lup * multi;
+               switch (bw_unit_type) {
+               case BATADV_BW_UNIT_MBIT:
+                       *up = lup * 10;
+                       break;
+               case BATADV_BW_UNIT_KBIT:
+               default:
+                       *up = lup / 100;
+                       break;
+               }
        }
 
        return true;
 }
 
+/**
+ * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       struct batadv_tvlv_gateway_data gw;
+       uint32_t down, up;
+       char gw_mode;
+
+       gw_mode = atomic_read(&bat_priv->gw_mode);
+
+       switch (gw_mode) {
+       case BATADV_GW_MODE_OFF:
+       case BATADV_GW_MODE_CLIENT:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+               break;
+       case BATADV_GW_MODE_SERVER:
+               down = atomic_read(&bat_priv->gw.bandwidth_down);
+               up = atomic_read(&bat_priv->gw.bandwidth_up);
+               gw.bandwidth_down = htonl(down);
+               gw.bandwidth_up = htonl(up);
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
+                                              &gw, sizeof(gw));
+               break;
+       }
+}
+
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       long gw_bandwidth_tmp = 0;
-       int up = 0, down = 0;
+       uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
        bool ret;
 
-       ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
+       down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
+       up_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_up);
+
+       ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
        if (!ret)
                goto end;
 
-       if ((!down) || (down < 256))
-               down = 2000;
-
-       if (!up)
-               up = down / 5;
+       if (!down_new)
+               down_new = 1;
 
-       batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+       if (!up_new)
+               up_new = down_new / 5;
 
-       /* the gw bandwidth we guessed above might not match the given
-        * speeds, hence we need to calculate it back to show the number
-        * that is going to be propagated
-        */
-       batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
+       if (!up_new)
+               up_new = 1;
 
-       if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+       if ((down_curr == down_new) && (up_curr == up_new))
                return count;
 
        batadv_gw_deselect(bat_priv);
        batadv_info(net_dev,
-                   "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
-                   atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
-                   (down > 2048 ? down / 1024 : down),
-                   (down > 2048 ? "MBit" : "KBit"),
-                   (up > 2048 ? up / 1024 : up),
-                   (up > 2048 ? "MBit" : "KBit"));
+                   "Changing gateway bandwidth from: '%u.%u/%u.%u MBit' to: '%u.%u/%u.%u MBit'\n",
+                   down_curr / 10, down_curr % 10, up_curr / 10, up_curr % 10,
+                   down_new / 10, down_new % 10, up_new / 10, up_new % 10);
 
-       atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
+       atomic_set(&bat_priv->gw.bandwidth_down, down_new);
+       atomic_set(&bat_priv->gw.bandwidth_up, up_new);
+       batadv_gw_tvlv_container_update(bat_priv);
 
 end:
        return count;
 }
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags,
+                                         void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
+
+       /* only fetch the tvlv value if the handler wasn't called via the
+        * CIFNOTFND flag and if there is data to fetch
+        */
+       if ((flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) ||
+           (tvlv_value_len < sizeof(gateway))) {
+               gateway.bandwidth_down = 0;
+               gateway.bandwidth_up = 0;
+       } else {
+               gateway_ptr = tvlv_value;
+               gateway.bandwidth_down = gateway_ptr->bandwidth_down;
+               gateway.bandwidth_up = gateway_ptr->bandwidth_up;
+               if ((gateway.bandwidth_down == 0) ||
+                   (gateway.bandwidth_up == 0)) {
+                       gateway.bandwidth_down = 0;
+                       gateway.bandwidth_up = 0;
+               }
+       }
+
+       batadv_gw_node_update(bat_priv, orig, &gateway);
+
+       /* restart gateway selection if fast or late switching was enabled */
+       if ((gateway.bandwidth_down != 0) &&
+           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
+           (atomic_read(&bat_priv->gw_sel_class) > 2))
+               batadv_gw_check_election(bat_priv, orig);
+}
+
+/**
+ * batadv_gw_init - initialise the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_init(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_GW, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_gw_free - free the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_free(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
+}
index 509b2bf8c2f4fa6388e095c998dd0006ec3d9b6e..56384a4cd18c98a2ac9010ea14bb89acf3a2d06d 100644 (file)
@@ -26,12 +26,24 @@ enum batadv_gw_modes {
        BATADV_GW_MODE_SERVER,
 };
 
+/**
+ * enum batadv_bandwidth_units - bandwidth unit types
+ * @BATADV_BW_UNIT_KBIT: unit type kbit
+ * @BATADV_BW_UNIT_MBIT: unit type mbit
+ */
+enum batadv_bandwidth_units {
+       BATADV_BW_UNIT_KBIT,
+       BATADV_BW_UNIT_MBIT,
+};
+
 #define BATADV_GW_MODE_OFF_NAME        "off"
 #define BATADV_GW_MODE_CLIENT_NAME     "client"
 #define BATADV_GW_MODE_SERVER_NAME     "server"
 
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count);
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
+void batadv_gw_init(struct batadv_priv *bat_priv);
+void batadv_gw_free(struct batadv_priv *bat_priv);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
index c478e6bcf89b8bd570a54a9a811b4309c7a2e5f0..57c2a19dcb5c8e19c410b27528f22eb671ed528a 100644 (file)
@@ -28,6 +28,7 @@
 #include "originator.h"
 #include "hash.h"
 #include "bridge_loop_avoidance.h"
+#include "gateway_client.h"
 
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -124,8 +125,11 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
  *
  * Returns true if the net device is a 802.11 wireless device, false otherwise.
  */
-static bool batadv_is_wifi_netdev(struct net_device *net_device)
+bool batadv_is_wifi_netdev(struct net_device *net_device)
 {
+       if (!net_device)
+               return false;
+
 #ifdef CONFIG_WIRELESS_EXT
        /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
         * check for wireless_handlers != NULL
@@ -141,34 +145,6 @@ static bool batadv_is_wifi_netdev(struct net_device *net_device)
        return false;
 }
 
-/**
- * batadv_is_wifi_iface - check if the given interface represented by ifindex
- *  is a wifi interface
- * @ifindex: interface index to check
- *
- * Returns true if the interface represented by ifindex is a 802.11 wireless
- * device, false otherwise.
- */
-bool batadv_is_wifi_iface(int ifindex)
-{
-       struct net_device *net_device = NULL;
-       bool ret = false;
-
-       if (ifindex == BATADV_NULL_IFINDEX)
-               goto out;
-
-       net_device = dev_get_by_index(&init_net, ifindex);
-       if (!net_device)
-               goto out;
-
-       ret = batadv_is_wifi_netdev(net_device);
-
-out:
-       if (net_device)
-               dev_put(net_device);
-       return ret;
-}
-
 static struct batadv_hard_iface *
 batadv_hardif_get_active(const struct net_device *soft_iface)
 {
@@ -194,22 +170,13 @@ out:
 static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
                                          struct batadv_hard_iface *oldif)
 {
-       struct batadv_vis_packet *vis_packet;
        struct batadv_hard_iface *primary_if;
-       struct sk_buff *skb;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
 
        batadv_dat_init_own_addr(bat_priv, primary_if);
-
-       skb = bat_priv->vis.my_info->skb_packet;
-       vis_packet = (struct batadv_vis_packet *)skb->data;
-       memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(vis_packet->sender_orig,
-              primary_if->net_dev->dev_addr, ETH_ALEN);
-
        batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
        if (primary_if)
@@ -275,16 +242,10 @@ static void batadv_check_known_mac_addr(const struct net_device *net_dev)
 
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
-       const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        const struct batadv_hard_iface *hard_iface;
-       /* allow big frames if all devices are capable to do so
-        * (have MTU > 1500 + BAT_HEADER_LEN)
-        */
        int min_mtu = ETH_DATA_LEN;
 
-       if (atomic_read(&bat_priv->fragmentation))
-               goto out;
-
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
                if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
@@ -294,23 +255,40 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
                if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               min_mtu = min_t(int,
-                               hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
-                               min_mtu);
+               min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
        }
        rcu_read_unlock();
+
+       atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+       if (atomic_read(&bat_priv->fragmentation) == 0)
+               goto out;
+
+       /* with fragmentation enabled the maximum size of internally generated
+        * packets such as translation table exchanges or tvlv containers, etc
+        * has to be calculated
+        */
+       min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+       min_mtu -= sizeof(struct batadv_frag_packet);
+       min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
+       atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+       /* with fragmentation enabled we can fragment external packets easily */
+       min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
+
 out:
-       return min_mtu;
+       return min_mtu - batadv_max_header_len();
 }
 
 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
-       int min_mtu;
+       soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
 
-       min_mtu = batadv_hardif_min_mtu(soft_iface);
-       if (soft_iface->mtu != min_mtu)
-               soft_iface->mtu = min_mtu;
+       /* Check if the local translate table should be cleaned up to match a
+        * new (and smaller) MTU.
+        */
+       batadv_tt_local_resize_to_mtu(soft_iface);
 }
 
 static void
@@ -388,7 +366,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 {
        struct batadv_priv *bat_priv;
        struct net_device *soft_iface, *master;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       __be16 ethertype = htons(ETH_P_BATMAN);
+       int max_header_len = batadv_max_header_len();
        int ret;
 
        if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
@@ -453,23 +432,22 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
        dev_add_pack(&hard_iface->batman_adv_ptype);
 
-       atomic_set(&hard_iface->frag_seqno, 1);
        batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
                    hard_iface->net_dev->name);
 
        if (atomic_read(&bat_priv->fragmentation) &&
-           hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+           hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
                batadv_info(hard_iface->soft_iface,
-                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
                            hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-                           ETH_DATA_LEN + BATADV_HEADER_LEN);
+                           ETH_DATA_LEN + max_header_len);
 
        if (!atomic_read(&bat_priv->fragmentation) &&
-           hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+           hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
                batadv_info(hard_iface->soft_iface,
-                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
                            hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-                           ETH_DATA_LEN + BATADV_HEADER_LEN);
+                           ETH_DATA_LEN + max_header_len);
 
        if (batadv_hardif_is_iface_up(hard_iface))
                batadv_hardif_activate_interface(hard_iface);
@@ -533,8 +511,12 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        dev_put(hard_iface->soft_iface);
 
        /* nobody uses this interface anymore */
-       if (!bat_priv->num_ifaces && autodel == BATADV_IF_CLEANUP_AUTO)
-               batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+       if (!bat_priv->num_ifaces) {
+               batadv_gw_check_client_stop(bat_priv);
+
+               if (autodel == BATADV_IF_CLEANUP_AUTO)
+                       batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+       }
 
        netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
        hard_iface->soft_iface = NULL;
@@ -652,6 +634,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
 
        if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
                batadv_sysfs_add_meshif(net_dev);
+               bat_priv = netdev_priv(net_dev);
+               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
                return NOTIFY_DONE;
        }
 
index 49892881a7c5bb581fc92837617380ad5b62752f..df4c8bd45c40748a5442999012ab581adf1d1d71 100644 (file)
@@ -41,6 +41,7 @@ enum batadv_hard_if_cleanup {
 
 extern struct notifier_block batadv_hard_if_notifier;
 
+bool batadv_is_wifi_netdev(struct net_device *net_device);
 struct batadv_hard_iface*
 batadv_hardif_get_by_netdev(const struct net_device *net_dev);
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
@@ -51,7 +52,6 @@ void batadv_hardif_remove_interfaces(void);
 int batadv_hardif_min_mtu(struct net_device *soft_iface);
 void batadv_update_min_mtu(struct net_device *soft_iface);
 void batadv_hardif_free_rcu(struct rcu_head *rcu);
-bool batadv_is_wifi_iface(int ifindex);
 
 static inline void
 batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
index 5a99bb4b6b823c8d2b042aec32cb2ff70ee3a8b9..29ae4efe3543e4bfc6ff013b9c97628b9e395209 100644 (file)
@@ -29,7 +29,7 @@
 static struct batadv_socket_client *batadv_socket_client_hash[256];
 
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-                                    struct batadv_icmp_packet_rr *icmp_packet,
+                                    struct batadv_icmp_header *icmph,
                                     size_t icmp_len);
 
 void batadv_socket_init(void)
@@ -155,13 +155,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
        struct batadv_priv *bat_priv = socket_client->bat_priv;
        struct batadv_hard_iface *primary_if = NULL;
        struct sk_buff *skb;
-       struct batadv_icmp_packet_rr *icmp_packet;
-
+       struct batadv_icmp_packet_rr *icmp_packet_rr;
+       struct batadv_icmp_header *icmp_header;
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_neigh_node *neigh_node = NULL;
        size_t packet_len = sizeof(struct batadv_icmp_packet);
 
-       if (len < sizeof(struct batadv_icmp_packet)) {
+       if (len < sizeof(struct batadv_icmp_header)) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: invalid packet size\n");
                return -EINVAL;
@@ -174,8 +174,10 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                goto out;
        }
 
-       if (len >= sizeof(struct batadv_icmp_packet_rr))
-               packet_len = sizeof(struct batadv_icmp_packet_rr);
+       if (len >= BATADV_ICMP_MAX_PACKET_SIZE)
+               packet_len = BATADV_ICMP_MAX_PACKET_SIZE;
+       else
+               packet_len = len;
 
        skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
        if (!skb) {
@@ -185,67 +187,78 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
 
        skb->priority = TC_PRIO_CONTROL;
        skb_reserve(skb, ETH_HLEN);
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
+       icmp_header = (struct batadv_icmp_header *)skb_put(skb, packet_len);
 
-       if (copy_from_user(icmp_packet, buff, packet_len)) {
+       if (copy_from_user(icmp_header, buff, packet_len)) {
                len = -EFAULT;
                goto free_skb;
        }
 
-       if (icmp_packet->header.packet_type != BATADV_ICMP) {
+       if (icmp_header->header.packet_type != BATADV_ICMP) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
                len = -EINVAL;
                goto free_skb;
        }
 
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+       switch (icmp_header->msg_type) {
+       case BATADV_ECHO_REQUEST:
+               if (len < sizeof(struct batadv_icmp_packet)) {
+                       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                                  "Error - can't send packet from char device: invalid packet size\n");
+                       len = -EINVAL;
+                       goto free_skb;
+               }
+
+               if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+                       goto dst_unreach;
+
+               orig_node = batadv_orig_hash_find(bat_priv, icmp_header->dst);
+               if (!orig_node)
+                       goto dst_unreach;
+
+               neigh_node = batadv_orig_node_get_router(orig_node);
+               if (!neigh_node)
+                       goto dst_unreach;
+
+               if (!neigh_node->if_incoming)
+                       goto dst_unreach;
+
+               if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
+                       goto dst_unreach;
+
+               icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
+               if (packet_len == sizeof(*icmp_packet_rr))
+                       memcpy(icmp_packet_rr->rr,
+                              neigh_node->if_incoming->net_dev->dev_addr,
+                              ETH_ALEN);
+
+               break;
+       default:
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+                          "Error - can't send packet from char device: got unknown message type\n");
                len = -EINVAL;
                goto free_skb;
        }
 
-       icmp_packet->uid = socket_client->index;
+       icmp_header->uid = socket_client->index;
 
-       if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
-               icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
-               icmp_packet->header.version = BATADV_COMPAT_VERSION;
-               batadv_socket_add_packet(socket_client, icmp_packet,
+       if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+               icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
+               icmp_header->header.version = BATADV_COMPAT_VERSION;
+               batadv_socket_add_packet(socket_client, icmp_header,
                                         packet_len);
                goto free_skb;
        }
 
-       if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
-               goto dst_unreach;
-
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
-       if (!orig_node)
-               goto dst_unreach;
-
-       neigh_node = batadv_orig_node_get_router(orig_node);
-       if (!neigh_node)
-               goto dst_unreach;
-
-       if (!neigh_node->if_incoming)
-               goto dst_unreach;
-
-       if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
-               goto dst_unreach;
-
-       memcpy(icmp_packet->orig,
-              primary_if->net_dev->dev_addr, ETH_ALEN);
-
-       if (packet_len == sizeof(struct batadv_icmp_packet_rr))
-               memcpy(icmp_packet->rr,
-                      neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
+       memcpy(icmp_header->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
 
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
        goto out;
 
 dst_unreach:
-       icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
-       batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
+       icmp_header->msg_type = BATADV_DESTINATION_UNREACHABLE;
+       batadv_socket_add_packet(socket_client, icmp_header, packet_len);
 free_skb:
        kfree_skb(skb);
 out:
@@ -298,27 +311,40 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
+ *  on an icmp socket.
+ * @socket_client: the socket this packet belongs to
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-                                    struct batadv_icmp_packet_rr *icmp_packet,
+                                    struct batadv_icmp_header *icmph,
                                     size_t icmp_len)
 {
        struct batadv_socket_packet *socket_packet;
+       size_t len;
 
        socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
 
        if (!socket_packet)
                return;
 
+       len = icmp_len;
+       /* check the maximum length before filling the buffer */
+       if (len > sizeof(socket_packet->icmp_packet))
+               len = sizeof(socket_packet->icmp_packet);
+
        INIT_LIST_HEAD(&socket_packet->list);
-       memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
-       socket_packet->icmp_len = icmp_len;
+       memcpy(&socket_packet->icmp_packet, icmph, len);
+       socket_packet->icmp_len = len;
 
        spin_lock_bh(&socket_client->lock);
 
        /* while waiting for the lock the socket_client could have been
         * deleted
         */
-       if (!batadv_socket_client_hash[icmp_packet->uid]) {
+       if (!batadv_socket_client_hash[icmph->uid]) {
                spin_unlock_bh(&socket_client->lock);
                kfree(socket_packet);
                return;
@@ -342,12 +368,18 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
        wake_up(&socket_client->queue_wait);
 }
 
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be received
+ *  locally and sent to userspace.
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
                                  size_t icmp_len)
 {
        struct batadv_socket_client *hash;
 
-       hash = batadv_socket_client_hash[icmp_packet->uid];
+       hash = batadv_socket_client_hash[icmph->uid];
        if (hash)
-               batadv_socket_add_packet(hash, icmp_packet, icmp_len);
+               batadv_socket_add_packet(hash, icmph, icmp_len);
 }
index 1fcca37b62234d92b2e6092dcdc4ccfed4d76ba2..6665080dff7950bc904c54fef35f91dd3135946a 100644 (file)
@@ -24,7 +24,7 @@
 
 void batadv_socket_init(void);
 int batadv_socket_setup(struct batadv_priv *bat_priv);
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
                                  size_t icmp_len);
 
 #endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
index c72d1bcdcf4906a1a23808cab2a464804a3fcda6..c51a5e568f0a80c08beb4ded5b34b1aff8d5cfdf 100644 (file)
 #include "gateway_client.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
-#include "vis.h"
+#include "gateway_common.h"
 #include "hash.h"
 #include "bat_algo.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -65,6 +66,7 @@ static int __init batadv_init(void)
        batadv_recv_handler_init();
 
        batadv_iv_init();
+       batadv_nc_init();
 
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
@@ -108,9 +110,11 @@ int batadv_mesh_init(struct net_device *soft_iface)
        spin_lock_init(&bat_priv->tt.req_list_lock);
        spin_lock_init(&bat_priv->tt.roam_list_lock);
        spin_lock_init(&bat_priv->tt.last_changeset_lock);
+       spin_lock_init(&bat_priv->tt.commit_lock);
        spin_lock_init(&bat_priv->gw.list_lock);
-       spin_lock_init(&bat_priv->vis.hash_lock);
-       spin_lock_init(&bat_priv->vis.list_lock);
+       spin_lock_init(&bat_priv->tvlv.container_list_lock);
+       spin_lock_init(&bat_priv->tvlv.handler_list_lock);
+       spin_lock_init(&bat_priv->softif_vlan_list_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
@@ -118,6 +122,9 @@ int batadv_mesh_init(struct net_device *soft_iface)
        INIT_LIST_HEAD(&bat_priv->tt.changes_list);
        INIT_LIST_HEAD(&bat_priv->tt.req_list);
        INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+       INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
+       INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
+       INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
 
        ret = batadv_originator_init(bat_priv);
        if (ret < 0)
@@ -127,13 +134,6 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
-                           BATADV_NULL_IFINDEX);
-
-       ret = batadv_vis_init(bat_priv);
-       if (ret < 0)
-               goto err;
-
        ret = batadv_bla_init(bat_priv);
        if (ret < 0)
                goto err;
@@ -142,10 +142,12 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       ret = batadv_nc_init(bat_priv);
+       ret = batadv_nc_mesh_init(bat_priv);
        if (ret < 0)
                goto err;
 
+       batadv_gw_init(bat_priv);
+
        atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
@@ -164,10 +166,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 
        batadv_purge_outstanding_packets(bat_priv, NULL);
 
-       batadv_vis_quit(bat_priv);
-
        batadv_gw_node_purge(bat_priv);
-       batadv_nc_free(bat_priv);
+       batadv_nc_mesh_free(bat_priv);
        batadv_dat_free(bat_priv);
        batadv_bla_free(bat_priv);
 
@@ -184,6 +184,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
         */
        batadv_originator_free(bat_priv);
 
+       batadv_gw_free(bat_priv);
+
        free_percpu(bat_priv->bat_counters);
        bat_priv->bat_counters = NULL;
 
@@ -253,6 +255,31 @@ out:
        return primary_if;
 }
 
+/**
+ * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ *  payload packet
+ *
+ * Return the maximum encapsulation overhead in bytes.
+ */
+int batadv_max_header_len(void)
+{
+       int header_len = 0;
+
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_unicast_packet));
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_unicast_4addr_packet));
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_bcast_packet));
+
+#ifdef CONFIG_BATMAN_ADV_NC
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_coded_packet));
+#endif
+
+       return header_len;
+}
+
 /**
  * batadv_skb_set_priority - sets skb priority according to packet content
  * @skb: the packet to be sent
@@ -391,22 +418,31 @@ static void batadv_recv_handler_init(void)
        for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
                batadv_rx_handler[i] = batadv_recv_unhandled_packet;
 
-       /* batman icmp packet */
-       batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+       for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
+               batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
+
+       /* compile time checks for struct member offsets */
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+
+       /* broadcast packet */
+       batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
+
+       /* unicast packets ... */
        /* unicast with 4 addresses packet */
        batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
        /* unicast packet */
        batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
-       /* fragmented unicast packet */
-       batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
-       /* broadcast packet */
-       batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
-       /* vis packet */
-       batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
-       /* Translation table query (request or response) */
-       batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
-       /* Roaming advertisement */
-       batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
+       /* unicast tvlv packet */
+       batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
+       /* batman icmp packet */
+       batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+       /* Fragmented packets */
+       batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
 int
@@ -414,7 +450,12 @@ batadv_recv_handler_register(uint8_t packet_type,
                             int (*recv_handler)(struct sk_buff *,
                                                 struct batadv_hard_iface *))
 {
-       if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
+       int (*curr)(struct sk_buff *,
+                   struct batadv_hard_iface *);
+       curr = batadv_rx_handler[packet_type];
+
+       if ((curr != batadv_recv_unhandled_packet) &&
+           (curr != batadv_recv_unhandled_unicast_packet))
                return -EBUSY;
 
        batadv_rx_handler[packet_type] = recv_handler;
@@ -460,7 +501,9 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
            !bat_algo_ops->bat_iface_update_mac ||
            !bat_algo_ops->bat_primary_iface_set ||
            !bat_algo_ops->bat_ogm_schedule ||
-           !bat_algo_ops->bat_ogm_emit) {
+           !bat_algo_ops->bat_ogm_emit ||
+           !bat_algo_ops->bat_neigh_cmp ||
+           !bat_algo_ops->bat_neigh_is_equiv_or_better) {
                pr_info("Routing algo '%s' does not implement required ops\n",
                        bat_algo_ops->name);
                ret = -EINVAL;
@@ -535,6 +578,601 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
        return htonl(crc);
 }
 
+/**
+ * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv handler to free
+ */
+static void
+batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
+{
+       if (atomic_dec_and_test(&tvlv_handler->refcount))
+               kfree_rcu(tvlv_handler, rcu);
+}
+
+/**
+ * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ *  based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @version: tvlv handler version to look for
+ *
+ * Returns tvlv handler if found or NULL otherwise.
+ */
+static struct batadv_tvlv_handler
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
+                        uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tvlv_handler_tmp,
+                                &bat_priv->tvlv.handler_list, list) {
+               if (tvlv_handler_tmp->type != type)
+                       continue;
+
+               if (tvlv_handler_tmp->version != version)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
+                       continue;
+
+               tvlv_handler = tvlv_handler_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return tvlv_handler;
+}
+
+/**
+ * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv container to free
+ */
+static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
+{
+       if (atomic_dec_and_test(&tvlv->refcount))
+               kfree(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ *  list based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to look for
+ * @version: tvlv container version to look for
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns tvlv container if found or NULL otherwise.
+ */
+static struct batadv_tvlv_container
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
+                          uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
+
+       hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
+               if (tvlv_tmp->tvlv_hdr.type != type)
+                       continue;
+
+               if (tvlv_tmp->tvlv_hdr.version != version)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
+                       continue;
+
+               tvlv = tvlv_tmp;
+               break;
+       }
+
+       return tvlv;
+}
+
+/**
+ * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ *  list entries
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns size of all currently registered tvlv containers in bytes.
+ */
+static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+{
+       struct batadv_tvlv_container *tvlv;
+       uint16_t tvlv_len = 0;
+
+       hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+               tvlv_len += sizeof(struct batadv_tvlv_hdr);
+               tvlv_len += ntohs(tvlv->tvlv_hdr.len);
+       }
+
+       return tvlv_len;
+}
+
+/**
+ * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
+ *  list
+ * @tvlv: the to be removed tvlv container
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ */
+static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+{
+       if (!tvlv)
+               return;
+
+       hlist_del(&tvlv->list);
+
+       /* first call to decrement the counter, second call to free */
+       batadv_tvlv_container_free_ref(tvlv);
+       batadv_tvlv_container_free_ref(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to unregister
+ * @version: tvlv container type to unregister
+ */
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+                                     uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_container *tvlv;
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv = batadv_tvlv_container_get(bat_priv, type, version);
+       batadv_tvlv_container_remove(tvlv);
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_container_register - register tvlv type, version and content
+ *  to be propagated with each (primary interface) OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type
+ * @version: tvlv container version
+ * @tvlv_value: tvlv container content
+ * @tvlv_value_len: tvlv container content length
+ *
+ * If a container of the same type and version was already registered the new
+ * content is going to replace the old one.
+ */
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version,
+                                   void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_container *tvlv_old, *tvlv_new;
+
+       if (!tvlv_value)
+               tvlv_value_len = 0;
+
+       tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
+       if (!tvlv_new)
+               return;
+
+       tvlv_new->tvlv_hdr.version = version;
+       tvlv_new->tvlv_hdr.type = type;
+       tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
+
+       memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
+       INIT_HLIST_NODE(&tvlv_new->list);
+       atomic_set(&tvlv_new->refcount, 1);
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
+       batadv_tvlv_container_remove(tvlv_old);
+       hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ *  requested packet size
+ * @packet_buff: packet buffer
+ * @packet_buff_len: packet buffer size
+ * @packet_min_len: requested packet minimum size
+ * @additional_packet_len: requested additional packet size on top of minimum
+ *  size
+ *
+ * Returns true of the packet buffer could be changed to the requested size,
+ * false otherwise.
+ */
+static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
+                                           int *packet_buff_len,
+                                           int min_packet_len,
+                                           int additional_packet_len)
+{
+       unsigned char *new_buff;
+
+       new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
+
+       /* keep old buffer if kmalloc should fail */
+       if (new_buff) {
+               memcpy(new_buff, *packet_buff, min_packet_len);
+               kfree(*packet_buff);
+               *packet_buff = new_buff;
+               *packet_buff_len = min_packet_len + additional_packet_len;
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ *  OGM packet buffer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: ogm packet buffer
+ * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
+ *  content
+ * @packet_min_len: ogm header size to be preserved for the OGM itself
+ *
+ * The ogm packet might be enlarged or shrunk depending on the current size
+ * and the size of the to-be-appended tvlv containers.
+ *
+ * Returns size of all appended tvlv containers in bytes.
+ */
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+                                         unsigned char **packet_buff,
+                                         int *packet_buff_len,
+                                         int packet_min_len)
+{
+       struct batadv_tvlv_container *tvlv;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       uint16_t tvlv_value_len;
+       void *tvlv_value;
+       bool ret;
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
+
+       ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
+                                             packet_min_len, tvlv_value_len);
+
+       if (!ret)
+               goto end;
+
+       if (!tvlv_value_len)
+               goto end;
+
+       tvlv_value = (*packet_buff) + packet_min_len;
+
+       hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+               tvlv_hdr = tvlv_value;
+               tvlv_hdr->type = tvlv->tvlv_hdr.type;
+               tvlv_hdr->version = tvlv->tvlv_hdr.version;
+               tvlv_hdr->len = tvlv->tvlv_hdr.len;
+               tvlv_value = tvlv_hdr + 1;
+               memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
+               tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+       }
+
+end:
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+       return tvlv_value_len;
+}
+
+/**
+ * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv_handler: tvlv callback function handling the tvlv content
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success if handler was not found or the return value of the handler
+ * callback.
+ */
+static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
+                                   struct batadv_tvlv_handler *tvlv_handler,
+                                   bool ogm_source,
+                                   struct batadv_orig_node *orig_node,
+                                   uint8_t *src, uint8_t *dst,
+                                   void *tvlv_value, uint16_t tvlv_value_len)
+{
+       if (!tvlv_handler)
+               return NET_RX_SUCCESS;
+
+       if (ogm_source) {
+               if (!tvlv_handler->ogm_handler)
+                       return NET_RX_SUCCESS;
+
+               if (!orig_node)
+                       return NET_RX_SUCCESS;
+
+               tvlv_handler->ogm_handler(bat_priv, orig_node,
+                                         BATADV_NO_FLAGS,
+                                         tvlv_value, tvlv_value_len);
+               tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
+       } else {
+               if (!src)
+                       return NET_RX_SUCCESS;
+
+               if (!dst)
+                       return NET_RX_SUCCESS;
+
+               if (!tvlv_handler->unicast_handler)
+                       return NET_RX_SUCCESS;
+
+               return tvlv_handler->unicast_handler(bat_priv, src,
+                                                    dst, tvlv_value,
+                                                    tvlv_value_len);
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success when processing an OGM or the return value of all called
+ * handler callbacks.
+ */
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+                                  bool ogm_source,
+                                  struct batadv_orig_node *orig_node,
+                                  uint8_t *src, uint8_t *dst,
+                                  void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       uint16_t tvlv_value_cont_len;
+       uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+       int ret = NET_RX_SUCCESS;
+
+       while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
+               tvlv_hdr = tvlv_value;
+               tvlv_value_cont_len = ntohs(tvlv_hdr->len);
+               tvlv_value = tvlv_hdr + 1;
+               tvlv_value_len -= sizeof(*tvlv_hdr);
+
+               if (tvlv_value_cont_len > tvlv_value_len)
+                       break;
+
+               tvlv_handler = batadv_tvlv_handler_get(bat_priv,
+                                                      tvlv_hdr->type,
+                                                      tvlv_hdr->version);
+
+               ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
+                                               ogm_source, orig_node,
+                                               src, dst, tvlv_value,
+                                               tvlv_value_cont_len);
+               if (tvlv_handler)
+                       batadv_tvlv_handler_free_ref(tvlv_handler);
+               tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+               tvlv_value_len -= tvlv_value_cont_len;
+       }
+
+       if (!ogm_source)
+               return ret;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tvlv_handler,
+                                &bat_priv->tvlv.handler_list, list) {
+               if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
+                   !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
+                       tvlv_handler->ogm_handler(bat_priv, orig_node,
+                                                 cifnotfound, NULL, 0);
+
+               tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
+       }
+       rcu_read_unlock();
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ *  handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @batadv_ogm_packet: ogm packet containing the tvlv containers
+ * @orig_node: orig node emitting the ogm packet
+ */
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+                            struct batadv_ogm_packet *batadv_ogm_packet,
+                            struct batadv_orig_node *orig_node)
+{
+       void *tvlv_value;
+       uint16_t tvlv_value_len;
+
+       if (!batadv_ogm_packet)
+               return;
+
+       tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
+       if (!tvlv_value_len)
+               return;
+
+       tvlv_value = batadv_ogm_packet + 1;
+
+       batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
+                                      tvlv_value, tvlv_value_len);
+}
+
+/**
+ * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ *  type and version (both need to match) for ogm tvlv payload and/or unicast
+ *  payload
+ * @bat_priv: the bat priv with all the soft interface information
+ * @optr: ogm tvlv handler callback function. This function receives the orig
+ *  node, flags and the tvlv content as argument to process.
+ * @uptr: unicast tvlv handler callback function. This function receives the
+ *  source & destination of the unicast packet as well as the tvlv content
+ *  to process.
+ * @type: tvlv handler type to be registered
+ * @version: tvlv handler version to be registered
+ * @flags: flags to enable or disable TVLV API behavior
+ */
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+                                 void (*optr)(struct batadv_priv *bat_priv,
+                                              struct batadv_orig_node *orig,
+                                              uint8_t flags,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len),
+                                 int (*uptr)(struct batadv_priv *bat_priv,
+                                             uint8_t *src, uint8_t *dst,
+                                             void *tvlv_value,
+                                             uint16_t tvlv_value_len),
+                                 uint8_t type, uint8_t version, uint8_t flags)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+
+       tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+       if (tvlv_handler) {
+               batadv_tvlv_handler_free_ref(tvlv_handler);
+               return;
+       }
+
+       tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+       if (!tvlv_handler)
+               return;
+
+       tvlv_handler->ogm_handler = optr;
+       tvlv_handler->unicast_handler = uptr;
+       tvlv_handler->type = type;
+       tvlv_handler->version = version;
+       tvlv_handler->flags = flags;
+       atomic_set(&tvlv_handler->refcount, 1);
+       INIT_HLIST_NODE(&tvlv_handler->list);
+
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+       hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+       spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+}
+
+/**
+ * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to be unregistered
+ * @version: tvlv handler version to be unregistered
+ */
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+
+       tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+       if (!tvlv_handler)
+               return;
+
+       batadv_tvlv_handler_free_ref(tvlv_handler);
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+       hlist_del_rcu(&tvlv_handler->list);
+       spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+       batadv_tvlv_handler_free_ref(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ *  specified host
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @type: tvlv type
+ * @version: tvlv version
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ */
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+                             uint8_t *dst, uint8_t type, uint8_t version,
+                             void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       struct batadv_orig_node *orig_node;
+       struct sk_buff *skb = NULL;
+       unsigned char *tvlv_buff;
+       unsigned int tvlv_len;
+       ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
+       bool ret = false;
+
+       orig_node = batadv_orig_hash_find(bat_priv, dst);
+       if (!orig_node)
+               goto out;
+
+       tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
+
+       skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
+       if (!skb)
+               goto out;
+
+       skb->priority = TC_PRIO_CONTROL;
+       skb_reserve(skb, ETH_HLEN);
+       tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
+       unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
+       unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
+       unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
+       unicast_tvlv_packet->header.ttl = BATADV_TTL;
+       unicast_tvlv_packet->reserved = 0;
+       unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
+       unicast_tvlv_packet->align = 0;
+       memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
+       memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
+
+       tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
+       tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
+       tvlv_hdr->version = version;
+       tvlv_hdr->type = type;
+       tvlv_hdr->len = htons(tvlv_value_len);
+       tvlv_buff += sizeof(*tvlv_hdr);
+       memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
+
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+               ret = true;
+
+out:
+       if (skb && !ret)
+               kfree_skb(skb);
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+}
+
+/**
+ * batadv_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet
+ * @header_len: length of the batman header preceding the ethernet header
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
+       struct vlan_ethhdr *vhdr;
+       unsigned short vid;
+
+       if (ethhdr->h_proto != htons(ETH_P_8021Q))
+               return BATADV_NO_FLAGS;
+
+       if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
+               return BATADV_NO_FLAGS;
+
+       vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
+       vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+       vid |= BATADV_VLAN_HAS_TAG;
+
+       return vid;
+}
+
 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
 {
        struct batadv_algo_ops *bat_algo_ops;
index 24675523930fe6b17b7ee15e2b1eece635ff87fe..f94f287b867083e755bd62dcc56af88f30837b33 100644 (file)
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
-                            "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+                            "Simon Wunderlich <sw@simonwunderlich.de>"
 #define BATADV_DRIVER_DESC   "B.A.T.M.A.N. advanced"
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.4.0"
+#define BATADV_SOURCE_VERSION "2013.5.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
 /* numbers of originator to contact for any PUT/GET DHT operation */
 #define BATADV_DAT_CANDIDATES_NUM 3
 
-#define BATADV_VIS_INTERVAL 5000       /* 5 seconds */
+/**
+ * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ *  at most from the primary one in order to be still considered acceptable
+ */
+#define BATADV_TQ_SIMILARITY_THRESHOLD 50
 
 /* how much worse secondary interfaces may be to be considered as bonding
  * candidates
@@ -133,6 +137,15 @@ enum batadv_uev_type {
 
 #define BATADV_GW_THRESHOLD    50
 
+/* Number of fragment chains for each orig_node */
+#define BATADV_FRAG_BUFFER_COUNT 8
+/* Maximum number of fragments for one packet */
+#define BATADV_FRAG_MAX_FRAGMENTS 16
+/* Maxumim size of each fragment */
+#define BATADV_FRAG_MAX_FRAG_SIZE 1400
+/* Time to keep fragments while waiting for rest of the fragments */
+#define BATADV_FRAG_TIMEOUT 10000
+
 #define BATADV_DAT_CANDIDATE_NOT_FOUND 0
 #define BATADV_DAT_CANDIDATE_ORIG      1
 
@@ -160,15 +173,9 @@ enum batadv_uev_type {
 #include <net/rtnetlink.h>
 #include <linux/jiffies.h>
 #include <linux/seq_file.h>
-#include "types.h"
+#include <linux/if_vlan.h>
 
-/**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
- * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
- */
-enum batadv_vlan_flags {
-       BATADV_VLAN_HAS_TAG     = BIT(15),
-};
+#include "types.h"
 
 #define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
                               (int)(vid & VLAN_VID_MASK) : -1)
@@ -184,6 +191,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+int batadv_max_header_len(void);
 void batadv_skb_set_priority(struct sk_buff *skb, int offset);
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                           struct packet_type *ptype,
@@ -326,4 +334,40 @@ static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version,
+                                   void *tvlv_value, uint16_t tvlv_value_len);
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+                                         unsigned char **packet_buff,
+                                         int *packet_buff_len,
+                                         int packet_min_len);
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+                            struct batadv_ogm_packet *batadv_ogm_packet,
+                            struct batadv_orig_node *orig_node);
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+                                     uint8_t type, uint8_t version);
+
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+                                 void (*optr)(struct batadv_priv *bat_priv,
+                                              struct batadv_orig_node *orig,
+                                              uint8_t flags,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len),
+                                 int (*uptr)(struct batadv_priv *bat_priv,
+                                             uint8_t *src, uint8_t *dst,
+                                             void *tvlv_value,
+                                             uint16_t tvlv_value_len),
+                                 uint8_t type, uint8_t version, uint8_t flags);
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version);
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+                                  bool ogm_source,
+                                  struct batadv_orig_node *orig_node,
+                                  uint8_t *src, uint8_t *dst,
+                                  void *tvlv_buff, uint16_t tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+                             uint8_t *dst, uint8_t type, uint8_t version,
+                             void *tvlv_value, uint16_t tvlv_value_len);
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
index a487d46e0aeccdb72ab4ad6e361dd0b4b87a7c05..351e199bc0afff37aa9c1a4b3f40201956dfde91 100644 (file)
@@ -34,6 +34,20 @@ static void batadv_nc_worker(struct work_struct *work);
 static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
+/**
+ * batadv_nc_init - one-time initialization for network coding
+ */
+int __init batadv_nc_init(void)
+{
+       int ret;
+
+       /* Register our packet type */
+       ret = batadv_recv_handler_register(BATADV_CODED,
+                                          batadv_nc_recv_coded_packet);
+
+       return ret;
+}
+
 /**
  * batadv_nc_start_timer - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
@@ -45,10 +59,63 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init - initialise coding hash table and start house keeping
+ * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
-int batadv_nc_init(struct batadv_priv *bat_priv)
+static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       char nc_mode;
+
+       nc_mode = atomic_read(&bat_priv->network_coding);
+
+       switch (nc_mode) {
+       case 0:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+               break;
+       case 1:
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
+                                              NULL, 0);
+               break;
+       }
+}
+
+/**
+ * batadv_nc_status_update - update the network coding tvlv container after
+ *  network coding setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_nc_status_update(struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       batadv_nc_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags,
+                                         void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+       else
+               orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+}
+
+/**
+ * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
        bat_priv->nc.timestamp_fwd_flush = jiffies;
        bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,14 +137,13 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
        batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
-       /* Register our packet type */
-       if (batadv_recv_handler_register(BATADV_CODED,
-                                        batadv_nc_recv_coded_packet) < 0)
-               goto err;
-
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
        batadv_nc_start_timer(bat_priv);
 
+       batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_NC, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+       batadv_nc_tvlv_container_update(bat_priv);
        return 0;
 
 err:
@@ -793,6 +859,10 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
        if (!atomic_read(&bat_priv->network_coding))
                goto out;
 
+       /* check if orig node is network coding enabled */
+       if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+               goto out;
+
        /* accept ogms from 'good' neighbors and single hop neighbors */
        if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
            !is_single_hop_neigh)
@@ -933,7 +1003,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
                                   struct batadv_nc_packet *nc_packet,
                                   struct batadv_neigh_node *neigh_node)
 {
-       uint8_t tq_weighted_neigh, tq_weighted_coding;
+       uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
        struct sk_buff *skb_dest, *skb_src;
        struct batadv_unicast_packet *packet1;
        struct batadv_unicast_packet *packet2;
@@ -958,8 +1028,10 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        if (!router_coding)
                goto out;
 
-       tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
-       tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
+       tq_tmp = batadv_nc_random_weight_tq(router_neigh->bat_iv.tq_avg);
+       tq_weighted_neigh = tq_tmp;
+       tq_tmp = batadv_nc_random_weight_tq(router_coding->bat_iv.tq_avg);
+       tq_weighted_coding = tq_tmp;
 
        /* Select one destination for the MAC-header dst-field based on
         * weighted TQ-values.
@@ -1721,12 +1793,13 @@ free_nc_packet:
 }
 
 /**
- * batadv_nc_free - clean up network coding memory
+ * batadv_nc_mesh_free - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
-void batadv_nc_free(struct batadv_priv *bat_priv)
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
-       batadv_recv_handler_unregister(BATADV_CODED);
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
        cancel_delayed_work_sync(&bat_priv->nc.work);
 
        batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
index 85a4ec81ad50bda26449cfdadbcaf28e62391b8b..d4fd315b5261904c1ca990578fc562b0bae41f89 100644 (file)
 
 #ifdef CONFIG_BATMAN_ADV_NC
 
-int batadv_nc_init(struct batadv_priv *bat_priv);
-void batadv_nc_free(struct batadv_priv *bat_priv);
+void batadv_nc_status_update(struct net_device *net_dev);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node,
                              struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +48,21 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
 
 #else /* ifdef CONFIG_BATMAN_ADV_NC */
 
-static inline int batadv_nc_init(struct batadv_priv *bat_priv)
+static inline void batadv_nc_status_update(struct net_device *net_dev)
+{
+}
+
+static inline int batadv_nc_init(void)
+{
+       return 0;
+}
+
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
        return 0;
 }
 
-static inline void batadv_nc_free(struct batadv_priv *bat_priv)
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
        return;
 }
index f50553a7de629a411d94d307ba2ea54327e7b28f..8ab14340d10f6433c849face4852e4de1b5b9547 100644 (file)
 #include "routing.h"
 #include "gateway_client.h"
 #include "hard-interface.h"
-#include "unicast.h"
 #include "soft-interface.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -36,7 +36,7 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
 static void batadv_purge_orig(struct work_struct *work);
 
 /* returns 1 if they are the same originator */
-static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+int batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_orig_node,
                                         hash_entry);
@@ -44,6 +44,88 @@ static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * if it does not exist.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+                         unsigned short vid)
+{
+       struct batadv_orig_node_vlan *vlan = NULL, *tmp;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+               if (tmp->vid != vid)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tmp->refcount))
+                       continue;
+
+               vlan = tmp;
+
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ *  object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns NULL in case of failure or the vlan object identified by vid and
+ * belonging to orig_node otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+                         unsigned short vid)
+{
+       struct batadv_orig_node_vlan *vlan;
+
+       spin_lock_bh(&orig_node->vlan_list_lock);
+
+       /* first look if an object for this vid already exists */
+       vlan = batadv_orig_node_vlan_get(orig_node, vid);
+       if (vlan)
+               goto out;
+
+       vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+       if (!vlan)
+               goto out;
+
+       atomic_set(&vlan->refcount, 2);
+       vlan->vid = vid;
+
+       list_add_rcu(&vlan->list, &orig_node->vlan_list);
+
+out:
+       spin_unlock_bh(&orig_node->vlan_list_lock);
+
+       return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
+ *  the originator-vlan object
+ * @orig_vlan: the originator-vlan object to release
+ */
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
+{
+       if (atomic_dec_and_test(&orig_vlan->refcount))
+               kfree_rcu(orig_vlan, rcu);
+}
+
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
        if (bat_priv->orig_hash)
@@ -90,11 +172,20 @@ batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
        return router;
 }
 
+/**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ * @orig_node: originator object representing the neighbour
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr)
+                     const uint8_t *neigh_addr,
+                     struct batadv_orig_node *orig_node)
 {
-       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_neigh_node *neigh_node;
 
        neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
@@ -104,15 +195,14 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
        INIT_HLIST_NODE(&neigh_node->list);
 
        memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
-       spin_lock_init(&neigh_node->lq_update_lock);
+       neigh_node->if_incoming = hard_iface;
+       neigh_node->orig_node = orig_node;
+
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
 
        /* extra reference for return */
        atomic_set(&neigh_node->refcount, 2);
 
-       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Creating new neighbor %pM on interface %s\n", neigh_addr,
-                  hard_iface->net_dev->name);
-
 out:
        return neigh_node;
 }
@@ -146,13 +236,15 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
-       batadv_frag_list_free(&orig_node->frag_list);
-       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+       batadv_frag_purge_orig(orig_node, NULL);
+
+       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
                                  "originator timed out");
 
+       if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+               orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
        kfree(orig_node->tt_buff);
-       kfree(orig_node->bcast_own);
-       kfree(orig_node->bcast_own_sum);
        kfree(orig_node);
 }
 
@@ -210,20 +302,22 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
        batadv_hash_destroy(hash);
 }
 
-/* this function finds or creates an originator entry for the given
- * address if it does not exits
+/**
+ * batadv_orig_node_new - creates a new orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the originator
+ *
+ * Creates a new originator object and initialise all the generic fields.
+ * The new object is not added to the originator list.
+ * Returns the newly created object or NULL on failure.
  */
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const uint8_t *addr)
 {
        struct batadv_orig_node *orig_node;
-       int size;
-       int hash_added;
+       struct batadv_orig_node_vlan *vlan;
        unsigned long reset_time;
-
-       orig_node = batadv_orig_hash_find(bat_priv, addr);
-       if (orig_node)
-               return orig_node;
+       int i;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Creating new originator: %pM\n", addr);
@@ -234,10 +328,12 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
 
        INIT_HLIST_HEAD(&orig_node->neigh_list);
        INIT_LIST_HEAD(&orig_node->bond_list);
-       spin_lock_init(&orig_node->ogm_cnt_lock);
+       INIT_LIST_HEAD(&orig_node->vlan_list);
        spin_lock_init(&orig_node->bcast_seqno_lock);
        spin_lock_init(&orig_node->neigh_list_lock);
        spin_lock_init(&orig_node->tt_buff_lock);
+       spin_lock_init(&orig_node->tt_lock);
+       spin_lock_init(&orig_node->vlan_list_lock);
 
        batadv_nc_init_orig(orig_node);
 
@@ -249,43 +345,32 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
        memcpy(orig_node->orig, addr, ETH_ALEN);
        batadv_dat_init_orig_node_addr(orig_node);
        orig_node->router = NULL;
-       orig_node->tt_crc = 0;
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
-       atomic_set(&orig_node->tt_size, 0);
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
        orig_node->batman_seqno_reset = reset_time;
 
        atomic_set(&orig_node->bond_candidates, 0);
 
-       size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
-
-       orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
-       if (!orig_node->bcast_own)
+       /* create a vlan object for the "untagged" LAN */
+       vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
+       if (!vlan)
                goto free_orig_node;
+       /* batadv_orig_node_vlan_new() increases the refcounter.
+        * Immediately release vlan since it is not needed anymore in this
+        * context
+        */
+       batadv_orig_node_vlan_free_ref(vlan);
 
-       size = bat_priv->num_ifaces * sizeof(uint8_t);
-       orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
-       INIT_LIST_HEAD(&orig_node->frag_list);
-       orig_node->last_frag_packet = 0;
-
-       if (!orig_node->bcast_own_sum)
-               goto free_bcast_own;
-
-       hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
-                                    batadv_choose_orig, orig_node,
-                                    &orig_node->hash_entry);
-       if (hash_added != 0)
-               goto free_bcast_own_sum;
+       for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+               INIT_HLIST_HEAD(&orig_node->fragments[i].head);
+               spin_lock_init(&orig_node->fragments[i].lock);
+               orig_node->fragments[i].size = 0;
+       }
 
        return orig_node;
-free_bcast_own_sum:
-       kfree(orig_node->bcast_own_sum);
-free_bcast_own:
-       kfree(orig_node->bcast_own);
 free_orig_node:
        kfree(orig_node);
        return NULL;
@@ -294,15 +379,16 @@ free_orig_node:
 static bool
 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig_node,
-                           struct batadv_neigh_node **best_neigh_node)
+                           struct batadv_neigh_node **best_neigh)
 {
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
        bool neigh_purged = false;
        unsigned long last_seen;
        struct batadv_hard_iface *if_incoming;
 
-       *best_neigh_node = NULL;
+       *best_neigh = NULL;
 
        spin_lock_bh(&orig_node->neigh_list_lock);
 
@@ -335,9 +421,12 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
                        batadv_bonding_candidate_del(orig_node, neigh_node);
                        batadv_neigh_node_free_ref(neigh_node);
                } else {
-                       if ((!*best_neigh_node) ||
-                           (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
-                               *best_neigh_node = neigh_node;
+                       /* store the best_neighbour if this is the first
+                        * iteration or if a better neighbor has been found
+                        */
+                       if (!*best_neigh ||
+                           bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0)
+                               *best_neigh = neigh_node;
                }
        }
 
@@ -388,17 +477,14 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                hlist_for_each_entry_safe(orig_node, node_tmp,
                                          head, hash_entry) {
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
-                               if (orig_node->gw_flags)
-                                       batadv_gw_node_delete(bat_priv,
-                                                             orig_node);
+                               batadv_gw_node_delete(bat_priv, orig_node);
                                hlist_del_rcu(&orig_node->hash_entry);
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
 
-                       if (batadv_has_timed_out(orig_node->last_frag_packet,
-                                                BATADV_FRAG_TIMEOUT))
-                               batadv_frag_list_free(&orig_node->frag_list);
+                       batadv_frag_purge_orig(orig_node,
+                                              batadv_frag_check_entry);
                }
                spin_unlock_bh(list_lock);
        }
@@ -429,100 +515,26 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
        struct batadv_hard_iface *primary_if;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
-       int batman_count = 0;
-       int last_seen_secs;
-       int last_seen_msecs;
-       unsigned long last_seen_jiffies;
-       uint32_t i;
 
        primary_if = batadv_seq_print_text_primary_if_get(seq);
        if (!primary_if)
-               goto out;
+               return 0;
 
-       seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+       seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
                   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
-                  primary_if->net_dev->dev_addr, net_dev->name);
-       seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-                  "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
-                  "Nexthop", "outgoingIF", "Potential nexthops");
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       neigh_node = batadv_orig_node_get_router(orig_node);
-                       if (!neigh_node)
-                               continue;
-
-                       if (neigh_node->tq_avg == 0)
-                               goto next;
-
-                       last_seen_jiffies = jiffies - orig_node->last_seen;
-                       last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
-                       last_seen_secs = last_seen_msecs / 1000;
-                       last_seen_msecs = last_seen_msecs % 1000;
-
-                       seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
-                                  orig_node->orig, last_seen_secs,
-                                  last_seen_msecs, neigh_node->tq_avg,
-                                  neigh_node->addr,
-                                  neigh_node->if_incoming->net_dev->name);
-
-                       hlist_for_each_entry_rcu(neigh_node_tmp,
-                                                &orig_node->neigh_list, list) {
-                               seq_printf(seq, " %pM (%3i)",
-                                          neigh_node_tmp->addr,
-                                          neigh_node_tmp->tq_avg);
-                       }
+                  primary_if->net_dev->dev_addr, net_dev->name,
+                  bat_priv->bat_algo_ops->name);
 
-                       seq_puts(seq, "\n");
-                       batman_count++;
+       batadv_hardif_free_ref(primary_if);
 
-next:
-                       batadv_neigh_node_free_ref(neigh_node);
-               }
-               rcu_read_unlock();
+       if (!bat_priv->bat_algo_ops->bat_orig_print) {
+               seq_puts(seq,
+                        "No printing function for this routing protocol\n");
+               return 0;
        }
 
-       if (batman_count == 0)
-               seq_puts(seq, "No batman nodes in range ...\n");
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return 0;
-}
-
-static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
-                                  int max_if_num)
-{
-       void *data_ptr;
-       size_t data_size, old_size;
-
-       data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
-       old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
-       data_ptr = kmalloc(data_size, GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own, old_size);
-       kfree(orig_node->bcast_own);
-       orig_node->bcast_own = data_ptr;
-
-       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own_sum,
-              (max_if_num - 1) * sizeof(uint8_t));
-       kfree(orig_node->bcast_own_sum);
-       orig_node->bcast_own_sum = data_ptr;
+       bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
 
        return 0;
 }
@@ -531,6 +543,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct batadv_hashtable *hash = bat_priv->orig_hash;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
@@ -545,10 +558,10 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
-                       ret = batadv_orig_node_add_if(orig_node, max_if_num);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+                       ret = 0;
+                       if (bao->bat_orig_add_if)
+                               ret = bao->bat_orig_add_if(orig_node,
+                                                          max_if_num);
                        if (ret == -ENOMEM)
                                goto err;
                }
@@ -562,54 +575,6 @@ err:
        return -ENOMEM;
 }
 
-static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
-                                  int max_if_num, int del_if_num)
-{
-       void *data_ptr = NULL;
-       int chunk_size;
-
-       /* last interface was removed */
-       if (max_if_num == 0)
-               goto free_bcast_own;
-
-       chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
-       data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       /* copy first part */
-       memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
-       /* copy second part */
-       memcpy((char *)data_ptr + del_if_num * chunk_size,
-              orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
-              (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
-       kfree(orig_node->bcast_own);
-       orig_node->bcast_own = data_ptr;
-
-       if (max_if_num == 0)
-               goto free_own_sum;
-
-       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own_sum,
-              del_if_num * sizeof(uint8_t));
-
-       memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
-              orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
-              (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
-       kfree(orig_node->bcast_own_sum);
-       orig_node->bcast_own_sum = data_ptr;
-
-       return 0;
-}
-
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
@@ -618,6 +583,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
        struct hlist_head *head;
        struct batadv_hard_iface *hard_iface_tmp;
        struct batadv_orig_node *orig_node;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        uint32_t i;
        int ret;
 
@@ -629,11 +595,11 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
-                       ret = batadv_orig_node_del_if(orig_node, max_if_num,
-                                                     hard_iface->if_num);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+                       ret = 0;
+                       if (bao->bat_orig_del_if)
+                               ret = bao->bat_orig_del_if(orig_node,
+                                                          max_if_num,
+                                                          hard_iface->if_num);
                        if (ret == -ENOMEM)
                                goto err;
                }
index 7887b84a9af43adbff91cb8e3695b7f29c36a399..6f77d808a91672d77a48426cd5d391abf366bfb4 100644 (file)
 
 #include "hash.h"
 
+int batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const uint8_t *addr);
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr);
+                     const uint8_t *neigh_addr,
+                     struct batadv_orig_node *orig_node);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
@@ -40,6 +42,13 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num);
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+                         unsigned short vid);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+                         unsigned short vid);
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
 
 
 /* hashfunction to choose an entry in a hash table of given size
index a51ccfc39da4144fa2ccee27b0e0afca741034dd..207459b62966d0975bca70894fcf5ce266a5da29 100644 (file)
 #ifndef _NET_BATMAN_ADV_PACKET_H_
 #define _NET_BATMAN_ADV_PACKET_H_
 
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ *     payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ *     the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
 enum batadv_packettype {
-       BATADV_IV_OGM           = 0x01,
-       BATADV_ICMP             = 0x02,
-       BATADV_UNICAST          = 0x03,
-       BATADV_BCAST            = 0x04,
-       BATADV_VIS              = 0x05,
-       BATADV_UNICAST_FRAG     = 0x06,
-       BATADV_TT_QUERY         = 0x07,
-       BATADV_ROAM_ADV         = 0x08,
-       BATADV_UNICAST_4ADDR    = 0x09,
-       BATADV_CODED            = 0x0a,
+       /* 0x00 - 0x3f: local packets or special rules for handling */
+       BATADV_IV_OGM           = 0x00,
+       BATADV_BCAST            = 0x01,
+       BATADV_CODED            = 0x02,
+       /* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN     0x40
+       BATADV_UNICAST          = 0x40,
+       BATADV_UNICAST_FRAG     = 0x41,
+       BATADV_UNICAST_4ADDR    = 0x42,
+       BATADV_ICMP             = 0x43,
+       BATADV_UNICAST_TVLV     = 0x44,
+#define BATADV_UNICAST_MAX     0x7f
+       /* 0x80 - 0xff: reserved */
 };
 
 /**
@@ -48,13 +65,21 @@ enum batadv_subtype {
 };
 
 /* this file is included by batctl which needs these defines */
-#define BATADV_COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 15
 
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ *     previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag is set when the primary interface address
+ *     is used, and the packet travels its first hop.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ *     one hop neighbor on the interface where it was originally received.
+ */
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
-       BATADV_PRIMARIES_FIRST_HOP = BIT(4),
-       BATADV_VIS_SERVER          = BIT(5),
-       BATADV_DIRECTLINK          = BIT(6),
+       BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
+       BATADV_PRIMARIES_FIRST_HOP = BIT(1),
+       BATADV_DIRECTLINK          = BIT(2),
 };
 
 /* ICMP message types */
@@ -66,43 +91,44 @@ enum batadv_icmp_packettype {
        BATADV_PARAMETER_PROBLEM       = 12,
 };
 
-/* vis defines */
-enum batadv_vis_packettype {
-       BATADV_VIS_TYPE_SERVER_SYNC   = 0,
-       BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
-};
-
-/* fragmentation defines */
-enum batadv_unicast_frag_flags {
-       BATADV_UNI_FRAG_HEAD      = BIT(0),
-       BATADV_UNI_FRAG_LARGETAIL = BIT(1),
-};
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
 
-/* TT_QUERY subtypes */
-#define BATADV_TT_QUERY_TYPE_MASK 0x3
-
-enum batadv_tt_query_packettype {
-       BATADV_TT_REQUEST  = 0,
-       BATADV_TT_RESPONSE = 1,
-};
-
-/* TT_QUERY flags */
-enum batadv_tt_query_flags {
-       BATADV_TT_FULL_TABLE = BIT(2),
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+       BATADV_TT_OGM_DIFF   = BIT(0),
+       BATADV_TT_REQUEST    = BIT(1),
+       BATADV_TT_RESPONSE   = BIT(2),
+       BATADV_TT_FULL_TABLE = BIT(4),
 };
 
 /* BATADV_TT_CLIENT flags.
  * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
- * BIT(15) are used for local computation only
+ * BIT(15) are used for local computation only.
+ * Flags from BIT(4) to BIT(7) are kept in sync with the rest of the network.
  */
 enum batadv_tt_client_flags {
        BATADV_TT_CLIENT_DEL     = BIT(0),
        BATADV_TT_CLIENT_ROAM    = BIT(1),
-       BATADV_TT_CLIENT_WIFI    = BIT(2),
-       BATADV_TT_CLIENT_TEMP    = BIT(3),
+       BATADV_TT_CLIENT_WIFI    = BIT(4),
        BATADV_TT_CLIENT_NOPURGE = BIT(8),
        BATADV_TT_CLIENT_NEW     = BIT(9),
        BATADV_TT_CLIENT_PENDING = BIT(10),
+       BATADV_TT_CLIENT_TEMP    = BIT(11),
+};
+
+/**
+ * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+       BATADV_VLAN_HAS_TAG     = BIT(15),
 };
 
 /* claim frame types for the bridge loop avoidance */
@@ -113,6 +139,22 @@ enum batadv_bla_claimframe {
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
 };
 
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ */
+enum batadv_tvlv_type {
+       BATADV_TVLV_GW          = 0x01,
+       BATADV_TVLV_DAT         = 0x02,
+       BATADV_TVLV_NC          = 0x03,
+       BATADV_TVLV_TT          = 0x04,
+       BATADV_TVLV_ROAM        = 0x05,
+};
+
 /* the destination hardware field in the ARP frame is used to
  * transport the claim type and the group id
  */
@@ -131,47 +173,74 @@ struct batadv_header {
         */
 };
 
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @header: common batman packet header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
 struct batadv_ogm_packet {
        struct batadv_header header;
-       uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
+       uint8_t  flags;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
        uint8_t  prev_sender[ETH_ALEN];
-       uint8_t  gw_flags;  /* flags related to gateway class */
+       uint8_t  reserved;
        uint8_t  tq;
-       uint8_t  tt_num_changes;
-       uint8_t  ttvn; /* translation table version number */
-       __be16   tt_crc;
-} __packed;
+       __be16   tvlv_len;
+       /* __packed is not needed as the struct size is divisible by 4,
+        * and the largest data type in this struct has a size of 4.
+        */
+};
 
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
-struct batadv_icmp_packet {
+/**
+ * batadv_icmp_header - common ICMP header
+ * @header: common batman header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ */
+struct batadv_icmp_header {
        struct batadv_header header;
        uint8_t  msg_type; /* see ICMP message types above */
        uint8_t  dst[ETH_ALEN];
        uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
        uint8_t  uid;
+};
+
+/**
+ * batadv_icmp_packet - ICMP packet
+ * @icmph: common ICMP header
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+       struct batadv_icmp_header icmph;
        uint8_t  reserved;
+       __be16   seqno;
 };
 
 #define BATADV_RR_LEN 16
 
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets
+/**
+ * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @icmph: common ICMP header
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-       struct batadv_header header;
-       uint8_t  msg_type; /* see ICMP message types above */
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
-       uint8_t  uid;
+       struct batadv_icmp_header icmph;
        uint8_t  rr_cur;
+       __be16   seqno;
        uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
+#define BATADV_ICMP_MAX_PACKET_SIZE    sizeof(struct batadv_icmp_packet_rr)
+
 /* All packet headers in front of an ethernet header have to be completely
  * divisible by 2 but not by 4 to make the payload after the ethernet
  * header again 4 bytes boundary aligned.
@@ -209,15 +278,32 @@ struct batadv_unicast_4addr_packet {
         */
 };
 
-struct batadv_unicast_frag_packet {
-       struct batadv_header header;
-       uint8_t  ttvn; /* destination translation table version number */
-       uint8_t  dest[ETH_ALEN];
-       uint8_t  flags;
-       uint8_t  align;
-       uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
-} __packed;
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @header: common batman packet header with type, compatversion, and ttl
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+       struct  batadv_header header;
+#if defined(__BIG_ENDIAN_BITFIELD)
+       uint8_t no:4;
+       uint8_t reserved:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       uint8_t reserved:4;
+       uint8_t no:4;
+#else
+#error "unknown bitfield endianess"
+#endif
+       uint8_t dest[ETH_ALEN];
+       uint8_t orig[ETH_ALEN];
+       __be16  seqno;
+       __be16  total_size;
+};
 
 struct batadv_bcast_packet {
        struct batadv_header header;
@@ -231,54 +317,6 @@ struct batadv_bcast_packet {
 
 #pragma pack()
 
-struct batadv_vis_packet {
-       struct batadv_header header;
-       uint8_t  vis_type;       /* which type of vis-participant sent this? */
-       __be32   seqno;          /* sequence number */
-       uint8_t  entries;        /* number of entries behind this struct */
-       uint8_t  reserved;
-       uint8_t  vis_orig[ETH_ALEN];    /* originator reporting its neighbors */
-       uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
-       uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
-};
-
-struct batadv_tt_query_packet {
-       struct batadv_header header;
-       /* the flag field is a combination of:
-        * - TT_REQUEST or TT_RESPONSE
-        * - TT_FULL_TABLE
-        */
-       uint8_t  flags;
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  src[ETH_ALEN];
-       /* the ttvn field is:
-        * if TT_REQUEST: ttvn that triggered the
-        *                request
-        * if TT_RESPONSE: new ttvn for the src
-        *                 orig_node
-        */
-       uint8_t  ttvn;
-       /* tt_data field is:
-        * if TT_REQUEST: crc associated with the
-        *                ttvn
-        * if TT_RESPONSE: table_size
-        */
-       __be16 tt_data;
-} __packed;
-
-struct batadv_roam_adv_packet {
-       struct batadv_header header;
-       uint8_t  reserved;
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  src[ETH_ALEN];
-       uint8_t  client[ETH_ALEN];
-} __packed;
-
-struct batadv_tt_change {
-       uint8_t flags;
-       uint8_t addr[ETH_ALEN];
-} __packed;
-
 /**
  * struct batadv_coded_packet - network coded packet
  * @header: common batman packet header and ttl of first included packet
@@ -311,4 +349,96 @@ struct batadv_coded_packet {
        __be16   coded_len;
 };
 
+/**
+ * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * @header: common batman packet header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundry
+ */
+struct batadv_unicast_tvlv_packet {
+       struct batadv_header header;
+       uint8_t  reserved;
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  src[ETH_ALEN];
+       __be16   tvlv_len;
+       uint16_t align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+       uint8_t type;
+       uint8_t version;
+       __be16  len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ *  container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+       __be32 bandwidth_down;
+       __be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ *  one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+       uint8_t flags;
+       uint8_t ttvn;
+       __be16  num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ *  the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+       __be32  crc;
+       __be16  vid;
+       uint16_t reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ *  batadv_tt_client_flags)
+ * @reserved: reserved field
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+       uint8_t flags;
+       uint8_t reserved;
+       uint8_t addr[ETH_ALEN];
+       __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+       uint8_t  client[ETH_ALEN];
+       __be16 vid;
+};
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
index 0439395d7ba5f3f11eea51ebc53e9760ff175111..d4114d775ad61f659b59437c67aa7054d9a88f92 100644 (file)
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "vis.h"
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
 #include "network-coding.h"
+#include "fragmentation.h"
+
+#include <linux/if_vlan.h>
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
@@ -46,7 +47,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
        if ((curr_router) && (!neigh_node)) {
                batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
                           "Deleting route towards: %pM\n", orig_node->orig);
-               batadv_tt_global_del_orig(bat_priv, orig_node,
+               batadv_tt_global_del_orig(bat_priv, orig_node, -1,
                                          "Deleted route towards originator");
 
        /* route added */
@@ -114,9 +115,19 @@ out:
        return;
 }
 
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+/**
+ * batadv_bonding_candidate_add - consider a new link for bonding mode towards
+ *  the given originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the target node
+ * @neigh_node: the neighbor representing the new link to consider for bonding
+ *  mode
+ */
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node)
 {
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
        uint8_t interference_candidate = 0;
 
@@ -131,8 +142,9 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
        if (!router)
                goto candidate_del;
 
+
        /* ... and is good enough to be considered */
-       if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
+       if (bao->bat_neigh_is_equiv_or_better(neigh_node, router))
                goto candidate_del;
 
        /* check if we have another candidate with the same mac address or
@@ -248,46 +260,65 @@ bool batadv_check_management_packet(struct sk_buff *skb,
        return true;
 }
 
+/**
+ * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: icmp packet to process
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
 static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb, size_t icmp_len)
+                                     struct sk_buff *skb)
 {
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_icmp_packet_rr *icmp_packet;
-       int ret = NET_RX_DROP;
+       struct batadv_icmp_header *icmph;
+       int res, ret = NET_RX_DROP;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
-       /* add data to device queue */
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
-               batadv_socket_receive_packet(icmp_packet, icmp_len);
-               goto out;
-       }
+       switch (icmph->msg_type) {
+       case BATADV_ECHO_REPLY:
+       case BATADV_DESTINATION_UNREACHABLE:
+       case BATADV_TTL_EXCEEDED:
+               /* receive the packet */
+               if (skb_linearize(skb) < 0)
+                       break;
 
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
+               batadv_socket_receive_packet(icmph, skb->len);
+               break;
+       case BATADV_ECHO_REQUEST:
+               /* answer echo request (ping) */
+               primary_if = batadv_primary_if_get_selected(bat_priv);
+               if (!primary_if)
+                       goto out;
 
-       /* answer echo request (ping) */
-       /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
-       if (!orig_node)
-               goto out;
+               /* get routing information */
+               orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
+               if (!orig_node)
+                       goto out;
 
-       /* create a copy of the skb, if needed, to modify it. */
-       if (skb_cow(skb, ETH_HLEN) < 0)
-               goto out;
+               /* create a copy of the skb, if needed, to modify it. */
+               if (skb_cow(skb, ETH_HLEN) < 0)
+                       goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+               icmph = (struct batadv_icmp_header *)skb->data;
 
-       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       icmp_packet->msg_type = BATADV_ECHO_REPLY;
-       icmp_packet->header.ttl = BATADV_TTL;
+               memcpy(icmph->dst, icmph->orig, ETH_ALEN);
+               memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+               icmph->msg_type = BATADV_ECHO_REPLY;
+               icmph->header.ttl = BATADV_TTL;
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = NET_RX_SUCCESS;
+               res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+               if (res != NET_XMIT_DROP)
+                       ret = NET_RX_SUCCESS;
 
+               break;
+       default:
+               /* drop unknown type */
+               goto out;
+       }
 out:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -307,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+       if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
-                        icmp_packet->orig, icmp_packet->dst);
+                        icmp_packet->icmph.orig, icmp_packet->icmph.dst);
                goto out;
        }
 
@@ -318,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
                goto out;
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
+       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
        if (!orig_node)
                goto out;
 
@@ -328,10 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
-       icmp_packet->header.ttl = BATADV_TTL;
+       memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
+       memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+              ETH_ALEN);
+       icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
+       icmp_packet->icmph.header.ttl = BATADV_TTL;
 
        if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
@@ -349,16 +381,13 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_icmp_packet_rr *icmp_packet;
+       struct batadv_icmp_header *icmph;
+       struct batadv_icmp_packet_rr *icmp_packet_rr;
        struct ethhdr *ethhdr;
        struct batadv_orig_node *orig_node = NULL;
-       int hdr_size = sizeof(struct batadv_icmp_packet);
+       int hdr_size = sizeof(struct batadv_icmp_header);
        int ret = NET_RX_DROP;
 
-       /* we truncate all incoming icmp packets if they don't match our size */
-       if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
-               hdr_size = sizeof(struct batadv_icmp_packet_rr);
-
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
                goto out;
@@ -377,26 +406,39 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
        /* add record route information if not full */
-       if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
-           (icmp_packet->rr_cur < BATADV_RR_LEN)) {
-               memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
+       if ((icmph->msg_type == BATADV_ECHO_REPLY ||
+            icmph->msg_type == BATADV_ECHO_REQUEST) &&
+           (skb->len >= sizeof(struct batadv_icmp_packet_rr))) {
+               if (skb_linearize(skb) < 0)
+                       goto out;
+
+               /* create a copy of the skb, if needed, to modify it. */
+               if (skb_cow(skb, ETH_HLEN) < 0)
+                       goto out;
+
+               icmph = (struct batadv_icmp_header *)skb->data;
+               icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+               if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+                       goto out;
+
+               memcpy(&(icmp_packet_rr->rr[icmp_packet_rr->rr_cur]),
                       ethhdr->h_dest, ETH_ALEN);
-               icmp_packet->rr_cur++;
+               icmp_packet_rr->rr_cur++;
        }
 
        /* packet for me */
-       if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
-               return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
+       if (batadv_is_my_mac(bat_priv, icmph->dst))
+               return batadv_recv_my_icmp_packet(bat_priv, skb);
 
        /* TTL exceeded */
-       if (icmp_packet->header.ttl < 2)
+       if (icmph->header.ttl < 2)
                return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
+       orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
        if (!orig_node)
                goto out;
 
@@ -404,10 +446,10 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        if (skb_cow(skb, ETH_HLEN) < 0)
                goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
        /* decrement ttl */
-       icmp_packet->header.ttl--;
+       icmph->header.ttl--;
 
        /* route it */
        if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -474,18 +516,25 @@ out:
        return router;
 }
 
-/* Interface Alternating: Use the best of the
- * remaining candidates which are not using
- * this interface.
+/**
+ * batadv_find_ifalter_router - find the best of the remaining candidates which
+ *  are not using this interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_orig: the destination
+ * @recv_if: the interface that the router returned by this function has to not
+ *  use
  *
- * Increases the returned router's refcount
+ * Returns the best candidate towards primary_orig that is not using recv_if.
+ * Increases the returned neighbor's refcount
  */
 static struct batadv_neigh_node *
-batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+batadv_find_ifalter_router(struct batadv_priv *bat_priv,
+                          struct batadv_orig_node *primary_orig,
                           const struct batadv_hard_iface *recv_if)
 {
-       struct batadv_neigh_node *tmp_neigh_node;
        struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
+       struct batadv_neigh_node *tmp_neigh_node;
 
        rcu_read_lock();
        list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -497,7 +546,7 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
                if (tmp_neigh_node->if_incoming == recv_if)
                        continue;
 
-               if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
+               if (router && bao->bat_neigh_cmp(tmp_neigh_node, router))
                        continue;
 
                if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
@@ -557,126 +606,6 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_tt_query_packet *tt_query;
-       uint16_t tt_size;
-       int hdr_size = sizeof(*tt_query);
-       char tt_flag;
-       size_t packet_size;
-
-       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
-               return NET_RX_DROP;
-
-       /* I could need to modify it */
-       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-               goto out;
-
-       tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-       switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
-       case BATADV_TT_REQUEST:
-               batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
-
-               /* If we cannot provide an answer the tt_request is
-                * forwarded
-                */
-               if (!batadv_send_tt_response(bat_priv, tt_query)) {
-                       if (tt_query->flags & BATADV_TT_FULL_TABLE)
-                               tt_flag = 'F';
-                       else
-                               tt_flag = '.';
-
-                       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Routing TT_REQUEST to %pM [%c]\n",
-                                  tt_query->dst,
-                                  tt_flag);
-                       return batadv_route_unicast_packet(skb, recv_if);
-               }
-               break;
-       case BATADV_TT_RESPONSE:
-               batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
-
-               if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
-                       /* packet needs to be linearized to access the TT
-                        * changes
-                        */
-                       if (skb_linearize(skb) < 0)
-                               goto out;
-                       /* skb_linearize() possibly changed skb->data */
-                       tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-                       tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
-
-                       /* Ensure we have all the claimed data */
-                       packet_size = sizeof(struct batadv_tt_query_packet);
-                       packet_size += tt_size;
-                       if (unlikely(skb_headlen(skb) < packet_size))
-                               goto out;
-
-                       batadv_handle_tt_response(bat_priv, tt_query);
-               } else {
-                       if (tt_query->flags & BATADV_TT_FULL_TABLE)
-                               tt_flag =  'F';
-                       else
-                               tt_flag = '.';
-                       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Routing TT_RESPONSE to %pM [%c]\n",
-                                  tt_query->dst,
-                                  tt_flag);
-                       return batadv_route_unicast_packet(skb, recv_if);
-               }
-               break;
-       }
-
-out:
-       /* returning NET_RX_DROP will make the caller function kfree the skb */
-       return NET_RX_DROP;
-}
-
-int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_roam_adv_packet *roam_adv_packet;
-       struct batadv_orig_node *orig_node;
-
-       if (batadv_check_unicast_packet(bat_priv, skb,
-                                       sizeof(*roam_adv_packet)) < 0)
-               goto out;
-
-       batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
-
-       roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
-
-       if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
-               return batadv_route_unicast_packet(skb, recv_if);
-
-       /* check if it is a backbone gateway. we don't accept
-        * roaming advertisement from it, as it has the same
-        * entries as we have.
-        */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
-               goto out;
-
-       orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
-       if (!orig_node)
-               goto out;
-
-       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Received ROAMING_ADV from %pM (client %pM)\n",
-                  roam_adv_packet->src, roam_adv_packet->client);
-
-       batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
-                            BATADV_TT_CLIENT_ROAM,
-                            atomic_read(&orig_node->last_ttvn) + 1);
-
-       batadv_orig_node_free_ref(orig_node);
-out:
-       /* returning NET_RX_DROP will make the caller function kfree the skb */
-       return NET_RX_DROP;
-}
-
 /* find a suitable router for this originator, and use
  * bonding if possible. increases the found neighbors
  * refcount.
@@ -751,7 +680,8 @@ batadv_find_router(struct batadv_priv *bat_priv,
        if (bonding_enabled)
                router = batadv_find_bond_router(primary_orig_node, recv_if);
        else
-               router = batadv_find_ifalter_router(primary_orig_node, recv_if);
+               router = batadv_find_ifalter_router(bat_priv, primary_orig_node,
+                                                   recv_if);
 
 return_router:
        if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
@@ -772,11 +702,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_neigh_node *neigh_node = NULL;
        struct batadv_unicast_packet *unicast_packet;
        struct ethhdr *ethhdr = eth_hdr(skb);
        int res, hdr_len, ret = NET_RX_DROP;
-       struct sk_buff *new_skb;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
@@ -793,46 +721,12 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        if (!orig_node)
                goto out;
 
-       /* find_router() increases neigh_nodes refcount if found. */
-       neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
-
-       if (!neigh_node)
-               goto out;
-
        /* create a copy of the skb, if needed, to modify it. */
        if (skb_cow(skb, ETH_HLEN) < 0)
                goto out;
 
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-       if (unicast_packet->header.packet_type == BATADV_UNICAST &&
-           atomic_read(&bat_priv->fragmentation) &&
-           skb->len > neigh_node->if_incoming->net_dev->mtu) {
-               ret = batadv_frag_send_skb(skb, bat_priv,
-                                          neigh_node->if_incoming,
-                                          neigh_node->addr);
-               goto out;
-       }
-
-       if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
-           batadv_frag_can_reassemble(skb,
-                                      neigh_node->if_incoming->net_dev->mtu)) {
-               ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
-
-               if (ret == NET_RX_DROP)
-                       goto out;
-
-               /* packet was buffered for late merge */
-               if (!new_skb) {
-                       ret = NET_RX_SUCCESS;
-                       goto out;
-               }
-
-               skb = new_skb;
-               unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       }
-
        /* decrement ttl */
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
        unicast_packet->header.ttl--;
 
        switch (unicast_packet->header.packet_type) {
@@ -867,8 +761,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        }
 
 out:
-       if (neigh_node)
-               batadv_neigh_node_free_ref(neigh_node);
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
        return ret;
@@ -879,6 +771,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
+ * @vid: VLAN identifier
  *
  * Search the translation table for dst_addr and update the unicast header with
  * the new corresponding information (originator address where the destination
@@ -889,21 +782,22 @@ out:
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
                              struct batadv_unicast_packet *unicast_packet,
-                             uint8_t *dst_addr)
+                             uint8_t *dst_addr, unsigned short vid)
 {
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_hard_iface *primary_if = NULL;
        bool ret = false;
        uint8_t *orig_addr, orig_ttvn;
 
-       if (batadv_is_my_client(bat_priv, dst_addr)) {
+       if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
                primary_if = batadv_primary_if_get_selected(bat_priv);
                if (!primary_if)
                        goto out;
                orig_addr = primary_if->net_dev->dev_addr;
                orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        } else {
-               orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
+               orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+                                                    vid);
                if (!orig_node)
                        goto out;
 
@@ -930,11 +824,12 @@ out:
 
 static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                                     struct sk_buff *skb, int hdr_len) {
-       uint8_t curr_ttvn, old_ttvn;
+       struct batadv_unicast_packet *unicast_packet;
+       struct batadv_hard_iface *primary_if;
        struct batadv_orig_node *orig_node;
+       uint8_t curr_ttvn, old_ttvn;
        struct ethhdr *ethhdr;
-       struct batadv_hard_iface *primary_if;
-       struct batadv_unicast_packet *unicast_packet;
+       unsigned short vid;
        int is_old_ttvn;
 
        /* check if there is enough data before accessing it */
@@ -946,6 +841,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                return 0;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       vid = batadv_get_vid(skb, hdr_len);
        ethhdr = (struct ethhdr *)(skb->data + hdr_len);
 
        /* check if the destination client was served by this node and it is now
@@ -953,9 +849,9 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * message and that it knows the new destination in the mesh to re-route
         * the packet to
         */
-       if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
+       if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
                if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-                                                 ethhdr->h_dest))
+                                                 ethhdr->h_dest, vid))
                        net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
                                                 bat_priv,
                                                 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
@@ -1001,7 +897,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * target host
         */
        if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-                                         ethhdr->h_dest)) {
+                                         ethhdr->h_dest, vid)) {
                net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
                                         "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
                                         unicast_packet->dest, ethhdr->h_dest,
@@ -1013,7 +909,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * currently served by this node or there is no destination at all and
         * it is possible to drop the packet
         */
-       if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
+       if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
                return 0;
 
        /* update the header in order to let the packet be delivered to this
@@ -1032,6 +928,34 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        return 1;
 }
 
+/**
+ * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ *     are in the unicast number space but not yet known to the implementation
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+                                        struct batadv_hard_iface *recv_if)
+{
+       struct batadv_unicast_packet *unicast_packet;
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       int check, hdr_size = sizeof(*unicast_packet);
+
+       check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+       if (check < 0)
+               return NET_RX_DROP;
+
+       /* we don't know about this type, drop it. */
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
+               return NET_RX_DROP;
+
+       return batadv_route_unicast_packet(skb, recv_if);
+}
+
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if)
 {
@@ -1094,51 +1018,112 @@ rx_success:
        return batadv_route_unicast_packet(skb, recv_if);
 }
 
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-                                 struct batadv_hard_iface *recv_if)
+/**
+ * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ * @dst_addr: the payload destination
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+                            struct batadv_hard_iface *recv_if)
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_unicast_frag_packet *unicast_packet;
-       int hdr_size = sizeof(*unicast_packet);
-       struct sk_buff *new_skb = NULL;
-       int ret;
+       struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+       unsigned char *tvlv_buff;
+       uint16_t tvlv_buff_len;
+       int hdr_size = sizeof(*unicast_tvlv_packet);
+       int ret = NET_RX_DROP;
 
        if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
 
-       if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+       /* the header is likely to be modified while forwarding */
+       if (skb_cow(skb, hdr_size) < 0)
                return NET_RX_DROP;
 
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
+       /* packet needs to be linearized to access the tvlv content */
+       if (skb_linearize(skb) < 0)
+               return NET_RX_DROP;
 
-       /* packet for me */
-       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
-               ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
+       unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
 
-               if (ret == NET_RX_DROP)
-                       return NET_RX_DROP;
+       tvlv_buff = (unsigned char *)(skb->data + hdr_size);
+       tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
 
-               /* packet was buffered for late merge */
-               if (!new_skb)
-                       return NET_RX_SUCCESS;
+       if (tvlv_buff_len > skb->len - hdr_size)
+               return NET_RX_DROP;
 
-               if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
-                                                         hdr_size))
-                       goto rx_success;
-               if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
-                                                       hdr_size))
-                       goto rx_success;
+       ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
+                                            unicast_tvlv_packet->src,
+                                            unicast_tvlv_packet->dst,
+                                            tvlv_buff, tvlv_buff_len);
 
-               batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-                                   sizeof(struct batadv_unicast_packet), NULL);
+       if (ret != NET_RX_SUCCESS)
+               ret = batadv_route_unicast_packet(skb, recv_if);
 
-rx_success:
-               return NET_RX_SUCCESS;
+       return ret;
+}
+
+/**
+ * batadv_recv_frag_packet - process received fragment
+ * @skb: the received fragment
+ * @recv_if: interface that the skb is received on
+ *
+ * This function does one of the three following things: 1) Forward fragment, if
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
+ * lack further fragments; 3) Merge fragments, if we have all needed parts.
+ *
+ * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ */
+int batadv_recv_frag_packet(struct sk_buff *skb,
+                           struct batadv_hard_iface *recv_if)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_orig_node *orig_node_src = NULL;
+       struct batadv_frag_packet *frag_packet;
+       int ret = NET_RX_DROP;
+
+       if (batadv_check_unicast_packet(bat_priv, skb,
+                                       sizeof(*frag_packet)) < 0)
+               goto out;
+
+       frag_packet = (struct batadv_frag_packet *)skb->data;
+       orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
+       if (!orig_node_src)
+               goto out;
+
+       /* Route the fragment if it is not for us and too big to be merged. */
+       if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
+           batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
+               ret = NET_RX_SUCCESS;
+               goto out;
        }
 
-       return batadv_route_unicast_packet(skb, recv_if);
-}
+       batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
+       /* Add fragment to buffer and merge if possible. */
+       if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+               goto out;
 
+       /* Deliver merged packet to the appropriate handler, if it was
+        * merged
+        */
+       if (skb)
+               batadv_batman_skb_recv(skb, recv_if->net_dev,
+                                      &recv_if->batman_adv_ptype, NULL);
+
+       ret = NET_RX_SUCCESS;
+
+out:
+       if (orig_node_src)
+               batadv_orig_node_free_ref(orig_node_src);
+
+       return ret;
+}
 
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if)
@@ -1240,53 +1225,3 @@ out:
                batadv_orig_node_free_ref(orig_node);
        return ret;
 }
-
-int batadv_recv_vis_packet(struct sk_buff *skb,
-                          struct batadv_hard_iface *recv_if)
-{
-       struct batadv_vis_packet *vis_packet;
-       struct ethhdr *ethhdr;
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       int hdr_size = sizeof(*vis_packet);
-
-       /* keep skb linear */
-       if (skb_linearize(skb) < 0)
-               return NET_RX_DROP;
-
-       if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
-
-       vis_packet = (struct batadv_vis_packet *)skb->data;
-       ethhdr = eth_hdr(skb);
-
-       /* not for me */
-       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
-               return NET_RX_DROP;
-
-       /* ignore own packets */
-       if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
-               return NET_RX_DROP;
-
-       if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
-               return NET_RX_DROP;
-
-       switch (vis_packet->vis_type) {
-       case BATADV_VIS_TYPE_SERVER_SYNC:
-               batadv_receive_server_sync_packet(bat_priv, vis_packet,
-                                                 skb_headlen(skb));
-               break;
-
-       case BATADV_VIS_TYPE_CLIENT_UPDATE:
-               batadv_receive_client_update_packet(bat_priv, vis_packet,
-                                                   skb_headlen(skb));
-               break;
-
-       default:        /* ignore unknown packet */
-               break;
-       }
-
-       /* We take a copy of the data in the packet, so we should
-        * always free the skbuf.
-        */
-       return NET_RX_DROP;
-}
index 72a29bde201022300194422a9caff389ab409731..19544ddb81b5abf417b69484c096aff239989a10 100644 (file)
@@ -30,23 +30,26 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if);
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if);
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-                                 struct batadv_hard_iface *recv_if);
+int batadv_recv_frag_packet(struct sk_buff *skb,
+                           struct batadv_hard_iface *iface);
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if);
-int batadv_recv_vis_packet(struct sk_buff *skb,
-                          struct batadv_hard_iface *recv_if);
 int batadv_recv_tt_query(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if);
 int batadv_recv_roam_adv(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+                            struct batadv_hard_iface *recv_if);
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+                                        struct batadv_hard_iface *recv_if);
 struct batadv_neigh_node *
 batadv_find_router(struct batadv_priv *bat_priv,
                   struct batadv_orig_node *orig_node,
                   const struct batadv_hard_iface *recv_if);
 void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node);
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node);
 void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
                                 struct batadv_orig_node *orig_neigh_node,
index 0266edd0fa7f7acb0be56f896de95ba7775108f6..c83be5ebaa285dffe64d051850e87f2fca02624b 100644 (file)
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "vis.h"
 #include "gateway_common.h"
+#include "gateway_client.h"
 #include "originator.h"
 #include "network-coding.h"
-
-#include <linux/if_ether.h>
+#include "fragmentation.h"
 
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -64,10 +63,10 @@ int batadv_send_skb_packet(struct sk_buff *skb,
        ethhdr = eth_hdr(skb);
        memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
-       ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+       ethhdr->h_proto = htons(ETH_P_BATMAN);
 
        skb_set_network_header(skb, ETH_HLEN);
-       skb->protocol = __constant_htons(ETH_P_BATMAN);
+       skb->protocol = htons(ETH_P_BATMAN);
 
        skb->dev = hard_iface->net_dev;
 
@@ -109,7 +108,19 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
        /* batadv_find_router() increases neigh_nodes refcount if found. */
        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
        if (!neigh_node)
-               return ret;
+               goto out;
+
+       /* Check if the skb is too large to send in one piece and fragment
+        * it if needed.
+        */
+       if (atomic_read(&bat_priv->fragmentation) &&
+           skb->len > neigh_node->if_incoming->net_dev->mtu) {
+               /* Fragment and send packet. */
+               if (batadv_frag_send_packet(skb, orig_node, neigh_node))
+                       ret = NET_XMIT_SUCCESS;
+
+               goto out;
+       }
 
        /* try to network code the packet, if it is received on an interface
         * (i.e. being forwarded). If the packet originates from this node or if
@@ -123,11 +134,225 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
                ret = NET_XMIT_SUCCESS;
        }
 
-       batadv_neigh_node_free_ref(neigh_node);
+out:
+       if (neigh_node)
+               batadv_neigh_node_free_ref(neigh_node);
+
+       return ret;
+}
+
+/**
+ * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ *  common fields for unicast packets
+ * @skb: the skb carrying the unicast header to initialize
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Returns false if the buffer extension was not possible or true otherwise.
+ */
+static bool
+batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
+                                 struct batadv_orig_node *orig_node)
+{
+       struct batadv_unicast_packet *unicast_packet;
+       uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+
+       if (batadv_skb_head_push(skb, hdr_size) < 0)
+               return false;
+
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       unicast_packet->header.version = BATADV_COMPAT_VERSION;
+       /* batman packet type: unicast */
+       unicast_packet->header.packet_type = BATADV_UNICAST;
+       /* set unicast ttl */
+       unicast_packet->header.ttl = BATADV_TTL;
+       /* copy the destination for faster routing */
+       memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+       /* set the destination tt version number */
+       unicast_packet->ttvn = ttvn;
+
+       return true;
+}
+
+/**
+ * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
+                                           struct batadv_orig_node *orig_node)
+{
+       size_t uni_size = sizeof(struct batadv_unicast_packet);
+
+       return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ *  unicast 4addr header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+                                          struct sk_buff *skb,
+                                          struct batadv_orig_node *orig,
+                                          int packet_subtype)
+{
+       struct batadv_hard_iface *primary_if;
+       struct batadv_unicast_4addr_packet *uc_4addr_packet;
+       bool ret = false;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
+
+       /* Pull the header space and fill the unicast_packet substructure.
+        * We can do that because the first member of the uc_4addr_packet
+        * is of type struct unicast_packet
+        */
+       if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
+                                              orig))
+               goto out;
+
+       uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+       uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+       memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+       uc_4addr_packet->subtype = packet_subtype;
+       uc_4addr_packet->reserved = 0;
+
+       ret = true;
+out:
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+       return ret;
+}
+
+/**
+ * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @orig_node: the originator to send the packet to
+ * @vid: the vid to be used to search the translation table
+ *
+ * Wrap the given skb into a batman-adv unicast or unicast-4addr header
+ * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
+ * as packet_type. Then send this frame to the given orig_node and release a
+ * reference to this orig_node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype,
+                                  struct batadv_orig_node *orig_node,
+                                  unsigned short vid)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct batadv_unicast_packet *unicast_packet;
+       int ret = NET_XMIT_DROP;
+
+       if (!orig_node)
+               goto out;
+
+       switch (packet_type) {
+       case BATADV_UNICAST:
+               if (!batadv_send_skb_prepare_unicast(skb, orig_node))
+                       goto out;
+               break;
+       case BATADV_UNICAST_4ADDR:
+               if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
+                                                          orig_node,
+                                                          packet_subtype))
+                       goto out;
+               break;
+       default:
+               /* this function supports UNICAST and UNICAST_4ADDR only. It
+                * should never be invoked with any other packet type
+                */
+               goto out;
+       }
+
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+       /* inform the destination node that we are still missing a correct route
+        * for this client. The destination will receive this packet and will
+        * try to reroute it because the ttvn contained in the header is less
+        * than the current one
+        */
+       if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
+               unicast_packet->ttvn = unicast_packet->ttvn - 1;
 
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+               ret = NET_XMIT_SUCCESS;
+
+out:
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+       if (ret == NET_XMIT_DROP)
+               kfree_skb(skb);
        return ret;
 }
 
+/**
+ * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
+ * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
+ * to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype, unsigned short vid)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct batadv_orig_node *orig_node;
+
+       orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+                                            ethhdr->h_dest, vid);
+       return batadv_send_skb_unicast(bat_priv, skb, packet_type,
+                                      packet_subtype, orig_node, vid);
+}
+
+/**
+ * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the currently selected gateway. Wrap the given skb into a batman-adv
+ * unicast header and send this frame to this gateway node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                          unsigned short vid)
+{
+       struct batadv_orig_node *orig_node;
+
+       orig_node = batadv_gw_get_selected_orig(bat_priv);
+       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+                                      orig_node, vid);
+}
+
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
index e7b17880fca4f46ef77783034e58ae1eeb00b641..aa2e2537a739297f76afa54b30d87d69976d35d2 100644 (file)
@@ -34,5 +34,58 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 void
 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
                                 const struct batadv_hard_iface *hard_iface);
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+                                          struct sk_buff *skb,
+                                          struct batadv_orig_node *orig_node,
+                                          int packet_subtype);
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype, unsigned short vid);
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                          unsigned short vid);
+
+/**
+ * batadv_send_skb_via_tt - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast header. Then send this frame to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
+                                        struct sk_buff *skb,
+                                        unsigned short vid)
+{
+       return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
+                                             vid);
+}
+
+/**
+ * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast-4addr header. Then send this frame to the according destination
+ * node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
+                                              struct sk_buff *skb,
+                                              int packet_subtype,
+                                              unsigned short vid)
+{
+       return batadv_send_skb_via_tt_generic(bat_priv, skb,
+                                             BATADV_UNICAST_4ADDR,
+                                             packet_subtype, vid);
+}
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
index 813db4e646021dea4c089d53ea65504a63ff0d2d..36f050876f8260245a008de079cf0974fa97b76e 100644 (file)
@@ -34,8 +34,6 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
-#include <linux/if_ether.h>
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
@@ -120,9 +118,10 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
 
        /* only modify transtable if it has been initialized before */
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
-               batadv_tt_local_remove(bat_priv, old_addr,
+               batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
                                       "mac address changed", false);
-               batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
+               batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+                                   BATADV_NULL_IFINDEX);
        }
 
        return 0;
@@ -139,36 +138,48 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+/**
+ * batadv_interface_set_rx_mode - set the rx mode of a device
+ * @dev: registered network device to modify
+ *
+ * We do not actually need to set any rx filters for the virtual batman
+ * soft interface. However a dummy handler enables a user to set static
+ * multicast listeners for instance.
+ */
+static void batadv_interface_set_rx_mode(struct net_device *dev)
+{
+}
+
 static int batadv_interface_tx(struct sk_buff *skb,
                               struct net_device *soft_iface)
 {
-       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct ethhdr *ethhdr;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_bcast_packet *bcast_packet;
-       struct vlan_ethhdr *vhdr;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       __be16 ethertype = htons(ETH_P_BATMAN);
        static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
                                                   0x00, 0x00};
        static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
                                                    0x00, 0x00};
+       struct vlan_ethhdr *vhdr;
        unsigned int header_len = 0;
        int data_len = skb->len, ret;
-       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-       bool do_bcast = false;
-       uint32_t seqno;
        unsigned long brd_delay = 1;
+       bool do_bcast = false, client_added;
+       unsigned short vid;
+       uint32_t seqno;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
 
        soft_iface->trans_start = jiffies;
+       vid = batadv_get_vid(skb, 0);
+       ethhdr = (struct ethhdr *)skb->data;
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
                vhdr = (struct vlan_ethhdr *)skb->data;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
                        break;
@@ -185,8 +196,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
        ethhdr = (struct ethhdr *)skb->data;
 
        /* Register the client MAC in the transtable */
-       if (!is_multicast_ether_addr(ethhdr->h_source))
-               batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+       if (!is_multicast_ether_addr(ethhdr->h_source)) {
+               client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
+                                                  vid, skb->skb_iif);
+               if (!client_added)
+                       goto dropped;
+       }
 
        /* don't accept stp packets. STP does not help in meshes.
         * better use the bridge loop avoidance ...
@@ -286,8 +301,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
 
                batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
 
-               ret = batadv_unicast_send_skb(bat_priv, skb);
-               if (ret != 0)
+               if (is_multicast_ether_addr(ethhdr->h_dest))
+                       ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+               else
+                       ret = batadv_send_skb_via_tt(bat_priv, skb, vid);
+
+               if (ret == NET_XMIT_DROP)
                        goto dropped_freed;
        }
 
@@ -309,12 +328,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
                         int hdr_size, struct batadv_orig_node *orig_node)
 {
+       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-       struct ethhdr *ethhdr;
+       __be16 ethertype = htons(ETH_P_BATMAN);
        struct vlan_ethhdr *vhdr;
-       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
-       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       struct ethhdr *ethhdr;
+       unsigned short vid;
        bool is_bcast;
 
        is_bcast = (batadv_header->packet_type == BATADV_BCAST);
@@ -326,13 +345,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
+       vid = batadv_get_vid(skb, hdr_size);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
                vhdr = (struct vlan_ethhdr *)skb->data;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
                        break;
@@ -368,9 +386,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        if (orig_node)
                batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
-                                                    ethhdr->h_source);
+                                                    ethhdr->h_source, vid);
 
-       if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+       if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest,
+                                 vid))
                goto dropped;
 
        netif_rx(skb);
@@ -382,6 +401,177 @@ out:
        return;
 }
 
+/**
+ * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
+ *  possibly free it
+ * @softif_vlan: the vlan object to release
+ */
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+{
+       if (atomic_dec_and_test(&softif_vlan->refcount))
+               kfree_rcu(softif_vlan, rcu);
+}
+
+/**
+ * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the identifier of the vlan object to retrieve
+ *
+ * Returns the private data of the vlan matching the vid passed as argument or
+ * NULL otherwise. The refcounter of the returned object is incremented by 1.
+ */
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+                                                 unsigned short vid)
+{
+       struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+               if (vlan_tmp->vid != vid)
+                       continue;
+
+               if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+                       continue;
+
+               vlan = vlan_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
+/**
+ * batadv_create_vlan - allocate the needed resources for a new vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Returns 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+{
+       struct batadv_softif_vlan *vlan;
+       int err;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (vlan) {
+               batadv_softif_vlan_free_ref(vlan);
+               return -EEXIST;
+       }
+
+       vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+       if (!vlan)
+               return -ENOMEM;
+
+       vlan->vid = vid;
+       atomic_set(&vlan->refcount, 1);
+
+       atomic_set(&vlan->ap_isolation, 0);
+
+       err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+       if (err) {
+               kfree(vlan);
+               return err;
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX);
+
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       return 0;
+}
+
+/**
+ * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the object to remove
+ */
+static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
+                                      struct batadv_softif_vlan *vlan)
+{
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_del_rcu(&vlan->list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       batadv_sysfs_del_vlan(bat_priv, vlan);
+
+       /* explicitly remove the associated TT local entry because it is marked
+        * with the NOPURGE flag
+        */
+       batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
+                              vlan->vid, "vlan interface destroyed", false);
+
+       batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_interface_add_vid - ndo_add_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the new vlan
+ *
+ * Set up all the internal structures for handling the new vlan on top of the
+ * mesh interface
+ *
+ * Returns 0 on success or a negative error code in case of failure.
+ */
+static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
+                                   unsigned short vid)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+
+       /* only 802.1Q vlans are supported.
+        * batman-adv does not know how to handle other types
+        */
+       if (proto != htons(ETH_P_8021Q))
+               return -EINVAL;
+
+       vid |= BATADV_VLAN_HAS_TAG;
+
+       return batadv_softif_create_vlan(bat_priv, vid);
+}
+
+/**
+ * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the deleted vlan
+ *
+ * Destroy all the internal structures used to handle the vlan identified by vid
+ * on top of the mesh interface
+ *
+ * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * or -ENOENT if the specified vlan id wasn't registered.
+ */
+static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+                                    unsigned short vid)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+
+       /* only 802.1Q vlans are supported. batman-adv does not know how to
+        * handle other types
+        */
+       if (proto != htons(ETH_P_8021Q))
+               return -EINVAL;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+       if (!vlan)
+               return -ENOENT;
+
+       batadv_softif_destroy_vlan(bat_priv, vlan);
+
+       /* finally free the vlan object */
+       batadv_softif_vlan_free_ref(vlan);
+
+       return 0;
+}
+
 /* batman-adv network devices have devices nesting below it and are a special
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
@@ -421,6 +611,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
  */
 static void batadv_softif_destroy_finish(struct work_struct *work)
 {
+       struct batadv_softif_vlan *vlan;
        struct batadv_priv *bat_priv;
        struct net_device *soft_iface;
 
@@ -428,6 +619,13 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
                                cleanup_work);
        soft_iface = bat_priv->soft_iface;
 
+       /* destroy the "untagged" VLAN */
+       vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+       if (vlan) {
+               batadv_softif_destroy_vlan(bat_priv, vlan);
+               batadv_softif_vlan_free_ref(vlan);
+       }
+
        batadv_sysfs_del_meshif(soft_iface);
 
        rtnl_lock();
@@ -444,6 +642,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
 static int batadv_softif_init_late(struct net_device *dev)
 {
        struct batadv_priv *bat_priv;
+       uint32_t random_seqno;
        int ret;
        size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
 
@@ -468,17 +667,17 @@ static int batadv_softif_init_late(struct net_device *dev)
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_set(&bat_priv->distributed_arp_table, 1);
 #endif
-       atomic_set(&bat_priv->ap_isolation, 0);
-       atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
        atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
        atomic_set(&bat_priv->gw_sel_class, 20);
-       atomic_set(&bat_priv->gw_bandwidth, 41);
+       atomic_set(&bat_priv->gw.bandwidth_down, 100);
+       atomic_set(&bat_priv->gw.bandwidth_up, 20);
        atomic_set(&bat_priv->orig_interval, 1000);
        atomic_set(&bat_priv->hop_penalty, 30);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
        atomic_set(&bat_priv->log_level, 0);
 #endif
        atomic_set(&bat_priv->fragmentation, 1);
+       atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
        atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
        atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
@@ -493,6 +692,10 @@ static int batadv_softif_init_late(struct net_device *dev)
        bat_priv->tt.last_changeset = NULL;
        bat_priv->tt.last_changeset_len = 0;
 
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+       atomic_set(&bat_priv->frag_seqno, random_seqno);
+
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
 
@@ -578,8 +781,11 @@ static const struct net_device_ops batadv_netdev_ops = {
        .ndo_open = batadv_interface_open,
        .ndo_stop = batadv_interface_release,
        .ndo_get_stats = batadv_interface_stats,
+       .ndo_vlan_rx_add_vid = batadv_interface_add_vid,
+       .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
        .ndo_set_mac_address = batadv_interface_set_mac_addr,
        .ndo_change_mtu = batadv_interface_change_mtu,
+       .ndo_set_rx_mode = batadv_interface_set_rx_mode,
        .ndo_start_xmit = batadv_interface_tx,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_add_slave = batadv_softif_slave_add,
@@ -616,6 +822,7 @@ static void batadv_softif_init_early(struct net_device *dev)
 
        dev->netdev_ops = &batadv_netdev_ops;
        dev->destructor = batadv_softif_free;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        dev->tx_queue_len = 0;
 
        /* can't call min_mtu, because the needed variables
@@ -623,7 +830,7 @@ static void batadv_softif_init_early(struct net_device *dev)
         */
        dev->mtu = ETH_DATA_LEN;
        /* reserve more space in the skbuff for our header */
-       dev->hard_header_len = BATADV_HEADER_LEN;
+       dev->hard_header_len = batadv_max_header_len();
 
        /* generate random address */
        eth_hw_addr_random(dev);
@@ -760,6 +967,12 @@ static const struct {
        { "mgmt_tx_bytes" },
        { "mgmt_rx" },
        { "mgmt_rx_bytes" },
+       { "frag_tx" },
+       { "frag_tx_bytes" },
+       { "frag_rx" },
+       { "frag_rx_bytes" },
+       { "frag_fwd" },
+       { "frag_fwd_bytes" },
        { "tt_request_tx" },
        { "tt_request_rx" },
        { "tt_response_tx" },
index 2f2472c2ea0d91aae4b50314ee1171928c5024ee..06fc91ff5a021bf584db73e75624e7d13d2560f7 100644 (file)
@@ -28,5 +28,9 @@ struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan);
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+                                                 unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
index 4114b961bc2c8400acc6bd776e2732313674b0e4..6335433310aff37a0438bc61367ce142d96cb07f 100644 (file)
 #include "sysfs.h"
 #include "translation-table.h"
 #include "distributed-arp-table.h"
+#include "network-coding.h"
 #include "originator.h"
 #include "hard-interface.h"
+#include "soft-interface.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
-#include "vis.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
@@ -39,6 +40,53 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
        return netdev_priv(net_dev);
 }
 
+/**
+ * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * @obj: kobject to covert
+ *
+ * Returns the associated batadv_priv struct.
+ */
+static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
+{
+       /* VLAN specific attributes are located in the root sysfs folder if they
+        * refer to the untagged VLAN..
+        */
+       if (!strcmp(BATADV_SYSFS_IF_MESH_SUBDIR, obj->name))
+               return batadv_kobj_to_batpriv(obj);
+
+       /* ..while the attributes for the tagged vlans are located in
+        * the in the corresponding "vlan%VID" subfolder
+        */
+       return batadv_kobj_to_batpriv(obj->parent);
+}
+
+/**
+ * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @obj: kobject to covert
+ *
+ * Returns the associated softif_vlan struct if found, NULL otherwise.
+ */
+static struct batadv_softif_vlan *
+batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
+{
+       struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+               if (vlan_tmp->kobj != obj)
+                       continue;
+
+               if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+                       continue;
+
+               vlan = vlan_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
 #define BATADV_UEV_TYPE_VAR    "BATTYPE="
 #define BATADV_UEV_ACTION_VAR  "BATACTION="
 #define BATADV_UEV_DATA_VAR    "BATDATA="
@@ -53,6 +101,15 @@ static char *batadv_uev_type_str[] = {
        "gw"
 };
 
+/* Use this, if you have customized show and store functions for vlan attrs */
+#define BATADV_ATTR_VLAN(_name, _mode, _show, _store)  \
+struct batadv_attribute batadv_attr_vlan_##_name = {   \
+       .attr = {.name = __stringify(_name),            \
+                .mode = _mode },                       \
+       .show   = _show,                                \
+       .store  = _store,                               \
+};
+
 /* Use this, if you have customized show and store functions */
 #define BATADV_ATTR(_name, _mode, _show, _store)       \
 struct batadv_attribute batadv_attr_##_name = {                \
@@ -122,6 +179,41 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                  \
        static BATADV_ATTR(_name, _mode, batadv_show_##_name,           \
                           batadv_store_##_name)
 
+#define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)                 \
+ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                        \
+                                 struct attribute *attr, char *buff,   \
+                                 size_t count)                         \
+{                                                                      \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+       struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+                                                             kobj);    \
+       size_t res = __batadv_store_bool_attr(buff, count, _post_func,  \
+                                             attr, &vlan->_name,       \
+                                             bat_priv->soft_iface);    \
+       batadv_softif_vlan_free_ref(vlan);                              \
+       return res;                                                     \
+}
+
+#define BATADV_ATTR_VLAN_SHOW_BOOL(_name)                              \
+ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                 \
+                                struct attribute *attr, char *buff)    \
+{                                                                      \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+       struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+                                                             kobj);    \
+       size_t res = sprintf(buff, "%s\n",                              \
+                            atomic_read(&vlan->_name) == 0 ?           \
+                            "disabled" : "enabled");                   \
+       batadv_softif_vlan_free_ref(vlan);                              \
+       return res;                                                     \
+}
+
+/* Use this, if you are going to turn a [name] in the vlan struct on or off */
+#define BATADV_ATTR_VLAN_BOOL(_name, _mode, _post_func)                        \
+       static BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)           \
+       static BATADV_ATTR_VLAN_SHOW_BOOL(_name)                        \
+       static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name, \
+                               batadv_store_vlan_##_name)
 
 static int batadv_store_bool_attr(char *buff, size_t count,
                                  struct net_device *net_dev,
@@ -230,74 +322,6 @@ __batadv_store_uint_attr(const char *buff, size_t count,
        return ret;
 }
 
-static ssize_t batadv_show_vis_mode(struct kobject *kobj,
-                                   struct attribute *attr, char *buff)
-{
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-       int vis_mode = atomic_read(&bat_priv->vis_mode);
-       const char *mode;
-
-       if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               mode = "client";
-       else
-               mode = "server";
-
-       return sprintf(buff, "%s\n", mode);
-}
-
-static ssize_t batadv_store_vis_mode(struct kobject *kobj,
-                                    struct attribute *attr, char *buff,
-                                    size_t count)
-{
-       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
-       struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       unsigned long val;
-       int ret, vis_mode_tmp = -1;
-       const char *old_mode, *new_mode;
-
-       ret = kstrtoul(buff, 10, &val);
-
-       if (((count == 2) && (!ret) &&
-            (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
-           (strncmp(buff, "client", 6) == 0) ||
-           (strncmp(buff, "off", 3) == 0))
-               vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
-
-       if (((count == 2) && (!ret) &&
-            (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
-           (strncmp(buff, "server", 6) == 0))
-               vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
-
-       if (vis_mode_tmp < 0) {
-               if (buff[count - 1] == '\n')
-                       buff[count - 1] = '\0';
-
-               batadv_info(net_dev,
-                           "Invalid parameter for 'vis mode' setting received: %s\n",
-                           buff);
-               return -EINVAL;
-       }
-
-       if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
-               return count;
-
-       if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               old_mode =  "client";
-       else
-               old_mode = "server";
-
-       if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               new_mode =  "client";
-       else
-               new_mode = "server";
-
-       batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
-                   new_mode);
-
-       atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
-       return count;
-}
-
 static ssize_t batadv_show_bat_algo(struct kobject *kobj,
                                    struct attribute *attr, char *buff)
 {
@@ -390,6 +414,7 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
         */
        batadv_gw_check_client_stop(bat_priv);
        atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+       batadv_gw_tvlv_container_update(bat_priv);
        return count;
 }
 
@@ -397,15 +422,13 @@ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
                                     struct attribute *attr, char *buff)
 {
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-       int down, up;
-       int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
-
-       batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
-       return sprintf(buff, "%i%s/%i%s\n",
-                      (down > 2048 ? down / 1024 : down),
-                      (down > 2048 ? "MBit" : "KBit"),
-                      (up > 2048 ? up / 1024 : up),
-                      (up > 2048 ? "MBit" : "KBit"));
+       uint32_t down, up;
+
+       down = atomic_read(&bat_priv->gw.bandwidth_down);
+       up = atomic_read(&bat_priv->gw.bandwidth_up);
+
+       return sprintf(buff, "%u.%u/%u.%u MBit\n", down / 10,
+                      down % 10, up / 10, up % 10);
 }
 
 static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
@@ -426,12 +449,10 @@ BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
 BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
-BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR,
+                    batadv_dat_status_update);
 #endif
 BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
-BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
-                  batadv_store_vis_mode);
 static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
 static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
                   batadv_store_gw_mode);
@@ -447,7 +468,8 @@ static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
 BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_NC
-BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
+                    batadv_nc_status_update);
 #endif
 
 static struct batadv_attribute *batadv_mesh_attrs[] = {
@@ -460,8 +482,6 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
        &batadv_attr_distributed_arp_table,
 #endif
        &batadv_attr_fragmentation,
-       &batadv_attr_ap_isolation,
-       &batadv_attr_vis_mode,
        &batadv_attr_routing_algo,
        &batadv_attr_gw_mode,
        &batadv_attr_orig_interval,
@@ -477,6 +497,16 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
        NULL,
 };
 
+BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+
+/**
+ * batadv_vlan_attrs - array of vlan specific sysfs attributes
+ */
+static struct batadv_attribute *batadv_vlan_attrs[] = {
+       &batadv_attr_vlan_ap_isolation,
+       NULL,
+};
+
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
        struct kobject *batif_kobject = &dev->dev.kobj;
@@ -527,6 +557,80 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
        bat_priv->mesh_obj = NULL;
 }
 
+/**
+ * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * @dev: netdev of the mesh interface
+ * @vlan: private data of the newly added VLAN interface
+ *
+ * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ */
+int batadv_sysfs_add_vlan(struct net_device *dev,
+                         struct batadv_softif_vlan *vlan)
+{
+       char vlan_subdir[sizeof(BATADV_SYSFS_VLAN_SUBDIR_PREFIX) + 5];
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_attribute **bat_attr;
+       int err;
+
+       if (vlan->vid & BATADV_VLAN_HAS_TAG) {
+               sprintf(vlan_subdir, BATADV_SYSFS_VLAN_SUBDIR_PREFIX "%hu",
+                       vlan->vid & VLAN_VID_MASK);
+
+               vlan->kobj = kobject_create_and_add(vlan_subdir,
+                                                   bat_priv->mesh_obj);
+               if (!vlan->kobj) {
+                       batadv_err(dev, "Can't add sysfs directory: %s/%s\n",
+                                  dev->name, vlan_subdir);
+                       goto out;
+               }
+       } else {
+               /* the untagged LAN uses the root folder to store its "VLAN
+                * specific attributes"
+                */
+               vlan->kobj = bat_priv->mesh_obj;
+               kobject_get(bat_priv->mesh_obj);
+       }
+
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) {
+               err = sysfs_create_file(vlan->kobj,
+                                       &((*bat_attr)->attr));
+               if (err) {
+                       batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+                                  dev->name, vlan_subdir,
+                                  ((*bat_attr)->attr).name);
+                       goto rem_attr;
+               }
+       }
+
+       return 0;
+
+rem_attr:
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+               sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+       kobject_put(vlan->kobj);
+       vlan->kobj = NULL;
+out:
+       return -ENOMEM;
+}
+
+/**
+ * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the private data of the VLAN to destroy
+ */
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+                          struct batadv_softif_vlan *vlan)
+{
+       struct batadv_attribute **bat_attr;
+
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+               sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+       kobject_put(vlan->kobj);
+       vlan->kobj = NULL;
+}
+
 static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
                                      struct attribute *attr, char *buff)
 {
index 479acf4c16f47d1068698d15362099459544af79..c7d725de50ade81593c0913ab5d868b23383058c 100644 (file)
 
 #define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
 #define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
+/**
+ * BATADV_SYSFS_VLAN_SUBDIR_PREFIX - prefix of the subfolder that will be
+ *  created in the sysfs hierarchy for each VLAN interface. The subfolder will
+ *  be named "BATADV_SYSFS_VLAN_SUBDIR_PREFIX%vid".
+ */
+#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
 struct batadv_attribute {
        struct attribute attr;
@@ -36,6 +42,10 @@ void batadv_sysfs_del_meshif(struct net_device *dev);
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
                            struct net_device *dev);
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_sysfs_add_vlan(struct net_device *dev,
+                         struct batadv_softif_vlan *vlan);
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+                          struct batadv_softif_vlan *vlan);
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
                        enum batadv_uev_action action, const char *data);
 
index 34510f38708fbd192ac5be8d4911e456c5d8ab50..4add57d4857f11e5ea9edfa13aae1f46dab3e4f9 100644 (file)
 #include "routing.h"
 #include "bridge_loop_avoidance.h"
 
-#include <linux/crc16.h>
+#include <linux/crc32c.h>
 
 /* hash class keys */
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
 static struct lock_class_key batadv_tt_global_hash_lock_class_key;
 
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+                                unsigned short vid,
                                 struct batadv_orig_node *orig_node);
 static void batadv_tt_purge(struct work_struct *work);
 static void
@@ -41,7 +42,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
                                 const unsigned char *addr,
-                                const char *message, bool roaming);
+                                unsigned short vid, const char *message,
+                                bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -52,43 +54,93 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_choose_tt - return the index of the tt entry in the hash table
+ * @data: pointer to the tt_common_entry object to map
+ * @size: the size of the hash table
+ *
+ * Returns the hash index where the object represented by 'data' should be
+ * stored at.
+ */
+static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+{
+       struct batadv_tt_common_entry *tt;
+       uint32_t hash = 0;
+
+       tt = (struct batadv_tt_common_entry *)data;
+       hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
+       hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
+
+       hash += (hash << 3);
+       hash ^= (hash >> 11);
+       hash += (hash << 15);
+
+       return hash % size;
+}
+
+/**
+ * batadv_tt_hash_find - look for a client in the given hash table
+ * @hash: the hash table to search
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the tt_common struct belonging to the searched client if
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
+batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+                   unsigned short vid)
 {
        struct hlist_head *head;
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
+       struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
        uint32_t index;
 
        if (!hash)
                return NULL;
 
-       index = batadv_choose_orig(data, hash->size);
+       memcpy(to_search.addr, addr, ETH_ALEN);
+       to_search.vid = vid;
+
+       index = batadv_choose_tt(&to_search, hash->size);
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
-               if (!batadv_compare_eth(tt_common_entry, data))
+       hlist_for_each_entry_rcu(tt, head, hash_entry) {
+               if (!batadv_compare_eth(tt, addr))
+                       continue;
+
+               if (tt->vid != vid)
                        continue;
 
-               if (!atomic_inc_not_zero(&tt_common_entry->refcount))
+               if (!atomic_inc_not_zero(&tt->refcount))
                        continue;
 
-               tt_common_entry_tmp = tt_common_entry;
+               tt_tmp = tt;
                break;
        }
        rcu_read_unlock();
 
-       return tt_common_entry_tmp;
+       return tt_tmp;
 }
 
+/**
+ * batadv_tt_local_hash_find - search the local table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+                         unsigned short vid)
 {
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
+                                             vid);
        if (tt_common_entry)
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -96,13 +148,24 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
        return tt_local_entry;
 }
 
+/**
+ * batadv_tt_global_hash_find - search the global table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * is found, NULL otherwise.
+ */
 static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+                          unsigned short vid)
 {
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
+                                             vid);
        if (tt_common_entry)
                tt_global_entry = container_of(tt_common_entry,
                                               struct batadv_tt_global_entry,
@@ -117,25 +180,17 @@ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
                kfree_rcu(tt_local_entry, common.rcu);
 }
 
-static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
-{
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_global_entry *tt_global_entry;
-
-       tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
-       tt_global_entry = container_of(tt_common_entry,
-                                      struct batadv_tt_global_entry, common);
-
-       kfree(tt_global_entry);
-}
-
+/**
+ * batadv_tt_global_entry_free_ref - decrement the refcounter for a
+ *  tt_global_entry and possibly free it
+ * @tt_global_entry: the object to free
+ */
 static void
 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
 {
        if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
                batadv_tt_global_del_orig_list(tt_global_entry);
-               call_rcu(&tt_global_entry->common.rcu,
-                        batadv_tt_global_entry_free_rcu);
+               kfree_rcu(tt_global_entry, common.rcu);
        }
 }
 
@@ -153,13 +208,107 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        kfree(orig_entry);
 }
 
+/**
+ * batadv_tt_local_size_mod - change the size by v of the local table identified
+ *  by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier of the sub-table to change
+ * @v: the amount to sum to the local table size
+ */
+static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
+                                    unsigned short vid, int v)
+{
+       struct batadv_softif_vlan *vlan;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return;
+
+       atomic_add(v, &vlan->tt.num_entries);
+
+       batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_local_size_inc - increase by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
+                                    unsigned short vid)
+{
+       batadv_tt_local_size_mod(bat_priv, vid, 1);
+}
+
+/**
+ * batadv_tt_local_size_dec - decrease by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
+                                    unsigned short vid)
+{
+       batadv_tt_local_size_mod(bat_priv, vid, -1);
+}
+
+/**
+ * batadv_tt_global_size_mod - change the size by v of the local table
+ *  identified by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ * @v: the amount to sum to the global table size
+ */
+static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+                                     unsigned short vid, int v)
+{
+       struct batadv_orig_node_vlan *vlan;
+
+       vlan = batadv_orig_node_vlan_new(orig_node, vid);
+       if (!vlan)
+               return;
+
+       if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+               spin_lock_bh(&orig_node->vlan_list_lock);
+               list_del_rcu(&vlan->list);
+               spin_unlock_bh(&orig_node->vlan_list_lock);
+               batadv_orig_node_vlan_free_ref(vlan);
+       }
+
+       batadv_orig_node_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_global_size_inc - increase by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
+                                     unsigned short vid)
+{
+       batadv_tt_global_size_mod(orig_node, vid, 1);
+}
+
+/**
+ * batadv_tt_global_size_dec - decrease by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+                                     unsigned short vid)
+{
+       batadv_tt_global_size_mod(orig_node, vid, -1);
+}
+
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
        if (!atomic_dec_and_test(&orig_entry->refcount))
                return;
-       /* to avoid race conditions, immediately decrease the tt counter */
-       atomic_dec(&orig_entry->orig_node->tt_size);
+
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
 }
 
@@ -180,12 +329,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        bool del_op_requested, del_op_entry;
 
        tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
-
        if (!tt_change_node)
                return;
 
        tt_change_node->change.flags = flags;
+       tt_change_node->change.reserved = 0;
        memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
+       tt_change_node->change.vid = htons(common->vid);
 
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
@@ -208,6 +358,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
                        goto del;
                if (del_op_requested && !del_op_entry)
                        goto del;
+
+               /* this is a second add in the same originator interval. It
+                * means that flags have been changed: update them!
+                */
+               if (!del_op_requested && !del_op_entry)
+                       entry->change.flags = flags;
+
                continue;
 del:
                list_del(&entry->list);
@@ -229,9 +386,55 @@ unlock:
                atomic_inc(&bat_priv->tt.local_changes);
 }
 
-int batadv_tt_len(int changes_num)
+/**
+ * batadv_tt_len - compute length in bytes of given number of tt changes
+ * @changes_num: number of tt changes
+ *
+ * Returns computed length in bytes.
+ */
+static int batadv_tt_len(int changes_num)
+{
+       return changes_num * sizeof(struct batadv_tvlv_tt_change);
+}
+
+/**
+ * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * @tt_len: available space
+ *
+ * Returns the number of entries.
+ */
+static uint16_t batadv_tt_entries(uint16_t tt_len)
+{
+       return tt_len / batadv_tt_len(1);
+}
+
+/**
+ * batadv_tt_local_table_transmit_size - calculates the local translation table
+ *  size when transmitted over the air
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns local translation table size in bytes.
+ */
+static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
-       return changes_num * sizeof(struct batadv_tt_change);
+       uint16_t num_vlan = 0, tt_local_entries = 0;
+       struct batadv_softif_vlan *vlan;
+       int hdr_size;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               num_vlan++;
+               tt_local_entries += atomic_read(&vlan->tt.num_entries);
+       }
+       rcu_read_unlock();
+
+       /* header size of tvlv encapsulated tt response payload */
+       hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
+       hdr_size += sizeof(struct batadv_tvlv_hdr);
+       hdr_size += sizeof(struct batadv_tvlv_tt_data);
+       hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
+
+       return hdr_size + batadv_tt_len(tt_local_entries);
 }
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
@@ -255,33 +458,51 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  const char *message)
 {
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Deleting global tt entry %pM: %s\n",
-                  tt_global->common.addr, message);
+                  "Deleting global tt entry %pM (vid: %d): %s\n",
+                  tt_global->common.addr,
+                  BATADV_PRINT_VID(tt_global->common.vid), message);
 
        batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_orig, tt_global->common.addr);
+                          batadv_choose_tt, &tt_global->common);
        batadv_tt_global_entry_free_ref(tt_global);
 }
 
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-                        int ifindex)
+/**
+ * batadv_tt_local_add - add a new client to the local table or update an
+ *  existing client
+ * @soft_iface: netdev struct of the mesh interface
+ * @addr: the mac address of the client to add
+ * @vid: VLAN identifier
+ * @ifindex: index of the interface where the client is connected to (useful to
+ *  identify wireless clients)
+ *
+ * Returns true if the client was successfully added, false otherwise.
+ */
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+                        unsigned short vid, int ifindex)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global;
+       struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
-       int hash_added;
-       bool roamed_back = false;
+       int hash_added, table_size, packet_size_max;
+       bool ret = false, roamed_back = false;
+       uint8_t remote_flags;
 
-       tt_local = batadv_tt_local_hash_find(bat_priv, addr);
-       tt_global = batadv_tt_global_hash_find(bat_priv, addr);
+       if (ifindex != BATADV_NULL_IFINDEX)
+               in_dev = dev_get_by_index(&init_net, ifindex);
+
+       tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
+       tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
 
        if (tt_local) {
                tt_local->last_seen = jiffies;
                if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Re-adding pending client %pM\n", addr);
+                                  "Re-adding pending client %pM (vid: %d)\n",
+                                  addr, BATADV_PRINT_VID(vid));
                        /* whatever the reason why the PENDING flag was set,
                         * this is a client which was enqueued to be removed in
                         * this orig_interval. Since it popped up again, the
@@ -293,8 +514,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
                if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Roaming client %pM came back to its original location\n",
-                                  addr);
+                                  "Roaming client %pM (vid: %d) came back to its original location\n",
+                                  addr, BATADV_PRINT_VID(vid));
                        /* the ROAM flag is set because this client roamed away
                         * and the node got a roaming_advertisement message. Now
                         * that the client popped up again at its original
@@ -306,12 +527,24 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                goto check_roaming;
        }
 
+       /* Ignore the client if we cannot send it in a full table response. */
+       table_size = batadv_tt_local_table_transmit_size(bat_priv);
+       table_size += batadv_tt_len(1);
+       packet_size_max = atomic_read(&bat_priv->packet_size_max);
+       if (table_size > packet_size_max) {
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
+                                        table_size, packet_size_max, addr);
+               goto out;
+       }
+
        tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
        if (!tt_local)
                goto out;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+                  "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+                  addr, BATADV_PRINT_VID(vid),
                   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        memcpy(tt_local->common.addr, addr, ETH_ALEN);
@@ -320,7 +553,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
         * (consistency check)
         */
        tt_local->common.flags = BATADV_TT_CLIENT_NEW;
-       if (batadv_is_wifi_iface(ifindex))
+       tt_local->common.vid = vid;
+       if (batadv_is_wifi_netdev(in_dev))
                tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
        atomic_set(&tt_local->common.refcount, 2);
        tt_local->last_seen = jiffies;
@@ -331,7 +565,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
 
        hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
-                                    batadv_choose_orig, &tt_local->common,
+                                    batadv_choose_tt, &tt_local->common,
                                     &tt_local->common.hash_entry);
 
        if (unlikely(hash_added != 0)) {
@@ -353,6 +587,7 @@ check_roaming:
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_entry, head, list) {
                        batadv_send_roam_adv(bat_priv, tt_global->common.addr,
+                                            tt_global->common.vid,
                                             orig_entry->orig_node);
                }
                rcu_read_unlock();
@@ -369,78 +604,219 @@ check_roaming:
                }
        }
 
+       /* store the current remote flags before altering them. This helps
+        * understanding is flags are changing or not
+        */
+       remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
+
+       if (batadv_is_wifi_netdev(in_dev))
+               tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+       else
+               tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
+
+       /* if any "dynamic" flag has been modified, resend an ADD event for this
+        * entry so that all the nodes can get the new flags
+        */
+       if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
+               batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+       ret = true;
 out:
+       if (in_dev)
+               dev_put(in_dev);
        if (tt_local)
                batadv_tt_local_entry_free_ref(tt_local);
        if (tt_global)
                batadv_tt_global_entry_free_ref(tt_global);
+       return ret;
 }
 
-static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
-                                         int *packet_buff_len,
-                                         int min_packet_len,
-                                         int new_packet_len)
+/**
+ * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ *  within a TT Response directed to another node
+ * @orig_node: originator for which the TT data has to be prepared
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changed can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire global TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN served by the originator node.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+                                  struct batadv_tvlv_tt_data **tt_data,
+                                  struct batadv_tvlv_tt_change **tt_change,
+                                  int32_t *tt_len)
 {
-       unsigned char *new_buff;
+       uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_orig_node_vlan *vlan;
+       uint8_t *tt_change_ptr;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               num_vlan++;
+               num_entries += atomic_read(&vlan->tt.num_entries);
+       }
+
+       change_offset = sizeof(**tt_data);
+       change_offset += num_vlan * sizeof(*tt_vlan);
 
-       new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
+       /* if tt_len is negative, allocate the space needed by the full table */
+       if (*tt_len < 0)
+               *tt_len = batadv_tt_len(num_entries);
 
-       /* keep old buffer if kmalloc should fail */
-       if (new_buff) {
-               memcpy(new_buff, *packet_buff, min_packet_len);
-               kfree(*packet_buff);
-               *packet_buff = new_buff;
-               *packet_buff_len = new_packet_len;
+       tvlv_len = *tt_len;
+       tvlv_len += change_offset;
+
+       *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+       if (!*tt_data) {
+               *tt_len = 0;
+               goto out;
+       }
+
+       (*tt_data)->flags = BATADV_NO_FLAGS;
+       (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
+       (*tt_data)->num_vlan = htons(num_vlan);
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               tt_vlan->vid = htons(vlan->vid);
+               tt_vlan->crc = htonl(vlan->tt.crc);
+
+               tt_vlan++;
        }
+
+       tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+       *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+       rcu_read_unlock();
+       return tvlv_len;
 }
 
-static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
-                                         unsigned char **packet_buff,
-                                         int *packet_buff_len,
-                                         int min_packet_len)
-{
-       int req_len;
+/**
+ * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
+ *  node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changes can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire local TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+ *
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+                                 struct batadv_tvlv_tt_data **tt_data,
+                                 struct batadv_tvlv_tt_change **tt_change,
+                                 int32_t *tt_len)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_softif_vlan *vlan;
+       uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
+       uint8_t *tt_change_ptr;
+       int change_offset;
 
-       req_len = min_packet_len;
-       req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               num_vlan++;
+               num_entries += atomic_read(&vlan->tt.num_entries);
+       }
 
-       /* if we have too many changes for one packet don't send any
-        * and wait for the tt table request which will be fragmented
-        */
-       if (req_len > bat_priv->soft_iface->mtu)
-               req_len = min_packet_len;
+       change_offset = sizeof(**tt_data);
+       change_offset += num_vlan * sizeof(*tt_vlan);
+
+       /* if tt_len is negative, allocate the space needed by the full table */
+       if (*tt_len < 0)
+               *tt_len = batadv_tt_len(num_entries);
+
+       tvlv_len = *tt_len;
+       tvlv_len += change_offset;
+
+       *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+       if (!*tt_data) {
+               tvlv_len = 0;
+               goto out;
+       }
+
+       (*tt_data)->flags = BATADV_NO_FLAGS;
+       (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
+       (*tt_data)->num_vlan = htons(num_vlan);
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               tt_vlan->vid = htons(vlan->vid);
+               tt_vlan->crc = htonl(vlan->tt.crc);
 
-       batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-                                     min_packet_len, req_len);
+               tt_vlan++;
+       }
+
+       tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+       *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+       rcu_read_unlock();
+       return tvlv_len;
 }
 
-static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
-                                      unsigned char **packet_buff,
-                                      int *packet_buff_len,
-                                      int min_packet_len)
+/**
+ * batadv_tt_tvlv_container_update - update the translation table tvlv container
+ *  after local tt changes have been committed
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_change_node *entry, *safe;
-       int count = 0, tot_changes = 0, new_len;
-       unsigned char *tt_buff;
+       struct batadv_tvlv_tt_data *tt_data;
+       struct batadv_tvlv_tt_change *tt_change;
+       int tt_diff_len, tt_change_len = 0;
+       int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
+       uint16_t tvlv_len;
+
+       tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+       tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+
+       /* if we have too many changes for one packet don't send any
+        * and wait for the tt table request which will be fragmented
+        */
+       if (tt_diff_len > bat_priv->soft_iface->mtu)
+               tt_diff_len = 0;
 
-       batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
-                                     packet_buff_len, min_packet_len);
+       tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+                                                    &tt_change, &tt_diff_len);
+       if (!tvlv_len)
+               return;
 
-       new_len = *packet_buff_len - min_packet_len;
-       tt_buff = *packet_buff + min_packet_len;
+       tt_data->flags = BATADV_TT_OGM_DIFF;
 
-       if (new_len > 0)
-               tot_changes = new_len / batadv_tt_len(1);
+       if (tt_diff_len == 0)
+               goto container_register;
 
        spin_lock_bh(&bat_priv->tt.changes_list_lock);
        atomic_set(&bat_priv->tt.local_changes, 0);
 
        list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
-               if (count < tot_changes) {
-                       memcpy(tt_buff + batadv_tt_len(count),
-                              &entry->change, sizeof(struct batadv_tt_change));
-                       count++;
+               if (tt_diff_entries_count < tt_diff_entries_num) {
+                       memcpy(tt_change + tt_diff_entries_count,
+                              &entry->change,
+                              sizeof(struct batadv_tvlv_tt_change));
+                       tt_diff_entries_count++;
                }
                list_del(&entry->list);
                kfree(entry);
@@ -452,20 +828,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
        kfree(bat_priv->tt.last_changeset);
        bat_priv->tt.last_changeset_len = 0;
        bat_priv->tt.last_changeset = NULL;
+       tt_change_len = batadv_tt_len(tt_diff_entries_count);
        /* check whether this new OGM has no changes due to size problems */
-       if (new_len > 0) {
+       if (tt_diff_entries_count > 0) {
                /* if kmalloc() fails we will reply with the full table
                 * instead of providing the diff
                 */
-               bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+               bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
                if (bat_priv->tt.last_changeset) {
-                       memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
-                       bat_priv->tt.last_changeset_len = new_len;
+                       memcpy(bat_priv->tt.last_changeset,
+                              tt_change, tt_change_len);
+                       bat_priv->tt.last_changeset_len = tt_diff_len;
                }
        }
        spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
-       return count;
+container_register:
+       batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+                                      tvlv_len);
+       kfree(tt_data);
 }
 
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -476,7 +857,9 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
+       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
+       unsigned short vid;
        uint32_t i;
        int last_seen_secs;
        int last_seen_msecs;
@@ -489,11 +872,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq,
-                  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
-                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
-                  bat_priv->tt.local_crc);
-       seq_printf(seq, "       %-13s %-7s %-10s\n", "Client", "Flags",
-                  "Last seen");
+                  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+       seq_printf(seq, "       %-13s  %s %-7s %-9s (%-10s)\n", "Client", "VID",
+                  "Flags", "Last seen", "CRC");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -504,6 +886,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+                       vid = tt_common_entry->vid;
                        last_seen_jiffies = jiffies - tt_local->last_seen;
                        last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
                        last_seen_secs = last_seen_msecs / 1000;
@@ -511,8 +894,17 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 
                        no_purge = tt_common_entry->flags & np_flag;
 
-                       seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
+                       vlan = batadv_softif_vlan_get(bat_priv, vid);
+                       if (!vlan) {
+                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
+                                          BATADV_PRINT_VID(vid));
+                               continue;
+                       }
+
+                       seq_printf(seq,
+                                  " * %pM %4i [%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
                                   (tt_common_entry->flags &
                                    BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                                   no_purge ? 'P' : '.',
@@ -523,7 +915,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                   (tt_common_entry->flags &
                                    BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                                   no_purge ? 0 : last_seen_secs,
-                                  no_purge ? 0 : last_seen_msecs);
+                                  no_purge ? 0 : last_seen_msecs,
+                                  vlan->tt.crc);
+
+                       batadv_softif_vlan_free_ref(vlan);
                }
                rcu_read_unlock();
        }
@@ -547,27 +942,29 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
        tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Local tt entry (%pM) pending to be removed: %s\n",
-                  tt_local_entry->common.addr, message);
+                  "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
+                  tt_local_entry->common.addr,
+                  BATADV_PRINT_VID(tt_local_entry->common.vid), message);
 }
 
 /**
  * batadv_tt_local_remove - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
+ * @vid: VLAN identifier
  * @message: message to append to the log on deletion
  * @roaming: true if the deletion is due to a roaming event
  *
  * Returns the flags assigned to the local entry before being deleted
  */
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-                               const uint8_t *addr, const char *message,
-                               bool roaming)
+                               const uint8_t *addr, unsigned short vid,
+                               const char *message, bool roaming)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
 
@@ -603,8 +1000,16 @@ out:
        return curr_flags;
 }
 
+/**
+ * batadv_tt_local_purge_list - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @head: pointer to the list containing the local tt entries
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
-                                      struct hlist_head *head)
+                                      struct hlist_head *head,
+                                      int timeout)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        struct batadv_tt_common_entry *tt_common_entry;
@@ -622,8 +1027,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
                if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
                        continue;
 
-               if (!batadv_has_timed_out(tt_local_entry->last_seen,
-                                         BATADV_TT_LOCAL_TIMEOUT))
+               if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
                        continue;
 
                batadv_tt_local_set_pending(bat_priv, tt_local_entry,
@@ -631,7 +1035,14 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
        }
 }
 
-static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_purge - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
+                                 int timeout)
 {
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct hlist_head *head;
@@ -643,7 +1054,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               batadv_tt_local_purge_list(bat_priv, head);
+               batadv_tt_local_purge_list(bat_priv, head, timeout);
                spin_unlock_bh(list_lock);
        }
 }
@@ -784,7 +1195,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 
        INIT_HLIST_NODE(&orig_entry->list);
        atomic_inc(&orig_node->refcount);
-       atomic_inc(&orig_node->tt_size);
+       batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
        atomic_set(&orig_entry->refcount, 2);
@@ -803,6 +1214,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
+ * @vid: VLAN identifier
  * @flags: TT flags that have to be set for this non-mesh client
  * @ttvn: the tt version number ever announcing this non-mesh client
  *
@@ -813,21 +1225,28 @@ out:
  * If a TT local entry exists for this non-mesh client remove it.
  *
  * The caller must hold orig_node refcount.
+ *
+ * Return true if the new entry has been added, false otherwise
  */
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-                        struct batadv_orig_node *orig_node,
-                        const unsigned char *tt_addr, uint16_t flags,
-                        uint8_t ttvn)
+static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+                                struct batadv_orig_node *orig_node,
+                                const unsigned char *tt_addr,
+                                unsigned short vid, uint16_t flags,
+                                uint8_t ttvn)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        struct batadv_tt_local_entry *tt_local_entry;
-       int ret = 0;
+       bool ret = false;
        int hash_added;
        struct batadv_tt_common_entry *common;
        uint16_t local_flags;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
+       /* ignore global entries from backbone nodes */
+       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
+               return true;
+
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
 
        /* if the node already has a local client for this entry, it has to wait
         * for a roaming advertisement instead of manually messing up the global
@@ -844,6 +1263,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
 
                common = &tt_global_entry->common;
                memcpy(common->addr, tt_addr, ETH_ALEN);
+               common->vid = vid;
 
                common->flags = flags;
                tt_global_entry->roam_at = 0;
@@ -861,7 +1281,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
 
                hash_added = batadv_hash_add(bat_priv->tt.global_hash,
                                             batadv_compare_tt,
-                                            batadv_choose_orig, common,
+                                            batadv_choose_tt, common,
                                             &common->hash_entry);
 
                if (unlikely(hash_added != 0)) {
@@ -920,14 +1340,15 @@ add_orig_entry:
        batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Creating new global tt entry: %pM (via %pM)\n",
-                  common->addr, orig_node->orig);
-       ret = 1;
+                  "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+                  common->addr, BATADV_PRINT_VID(common->vid),
+                  orig_node->orig);
+       ret = true;
 
 out_remove:
 
        /* remove address from local hash if present */
-       local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
+       local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
                                             "global tt received",
                                             flags & BATADV_TT_CLIENT_ROAM);
        tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
@@ -947,18 +1368,20 @@ out:
 }
 
 /* batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
  * This functon assumes the caller holds rcu_read_lock().
  * Returns best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
-batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
+batadv_transtable_best_orig(struct batadv_priv *bat_priv,
+                           struct batadv_tt_global_entry *tt_global_entry)
 {
-       struct batadv_neigh_node *router = NULL;
+       struct batadv_neigh_node *router, *best_router = NULL;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
-       int best_tq = 0;
 
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_rcu(orig_entry, head, list) {
@@ -966,64 +1389,104 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
                if (!router)
                        continue;
 
-               if (router->tq_avg > best_tq) {
-                       best_entry = orig_entry;
-                       best_tq = router->tq_avg;
+               if (best_router &&
+                   bao->bat_neigh_cmp(router, best_router) <= 0) {
+                       batadv_neigh_node_free_ref(router);
+                       continue;
                }
 
-               batadv_neigh_node_free_ref(router);
+               /* release the refcount for the "old" best */
+               if (best_router)
+                       batadv_neigh_node_free_ref(best_router);
+
+               best_entry = orig_entry;
+               best_router = router;
        }
 
+       if (best_router)
+               batadv_neigh_node_free_ref(best_router);
+
        return best_entry;
 }
 
 /* batadv_tt_global_print_entry - print all orig nodes who announce the address
  * for this global entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
  *
  * This functon assumes the caller holds rcu_read_lock().
  */
 static void
-batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
+                            struct batadv_tt_global_entry *tt_global_entry,
                             struct seq_file *seq)
 {
-       struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
        struct batadv_tt_common_entry *tt_common_entry;
-       uint16_t flags;
+       struct batadv_orig_node_vlan *vlan;
+       struct hlist_head *head;
        uint8_t last_ttvn;
+       uint16_t flags;
 
        tt_common_entry = &tt_global_entry->common;
        flags = tt_common_entry->flags;
 
-       best_entry = batadv_transtable_best_orig(tt_global_entry);
+       best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
        if (best_entry) {
+               vlan = batadv_orig_node_vlan_get(best_entry->orig_node,
+                                                tt_common_entry->vid);
+               if (!vlan) {
+                       seq_printf(seq,
+                                  " * Cannot retrieve VLAN %d for originator %pM\n",
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  best_entry->orig_node->orig);
+                       goto print_list;
+               }
+
                last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
                seq_printf(seq,
-                          " %c %pM  (%3u) via %pM     (%3u)   (%#.4x) [%c%c%c]\n",
+                          " %c %pM %4i   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
                           '*', tt_global_entry->common.addr,
+                          BATADV_PRINT_VID(tt_global_entry->common.vid),
                           best_entry->ttvn, best_entry->orig_node->orig,
-                          last_ttvn, best_entry->orig_node->tt_crc,
+                          last_ttvn, vlan->tt.crc,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                           (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                           (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+               batadv_orig_node_vlan_free_ref(vlan);
        }
 
+print_list:
        head = &tt_global_entry->orig_list;
 
        hlist_for_each_entry_rcu(orig_entry, head, list) {
                if (best_entry == orig_entry)
                        continue;
 
+               vlan = batadv_orig_node_vlan_get(orig_entry->orig_node,
+                                                tt_common_entry->vid);
+               if (!vlan) {
+                       seq_printf(seq,
+                                  " + Cannot retrieve VLAN %d for originator %pM\n",
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  orig_entry->orig_node->orig);
+                       continue;
+               }
+
                last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-               seq_printf(seq, " %c %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
+               seq_printf(seq,
+                          " %c %pM %4d   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
                           '+', tt_global_entry->common.addr,
+                          BATADV_PRINT_VID(tt_global_entry->common.vid),
                           orig_entry->ttvn, orig_entry->orig_node->orig,
-                          last_ttvn,
+                          last_ttvn, vlan->tt.crc,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                           (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                           (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+               batadv_orig_node_vlan_free_ref(vlan);
        }
 }
 
@@ -1045,9 +1508,9 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Globally announced TT entries received via the mesh %s\n",
                   net_dev->name);
-       seq_printf(seq, "       %-13s %s       %-15s %s (%-6s) %s\n",
-                  "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
-                  "Flags");
+       seq_printf(seq, "       %-13s  %s  %s       %-15s %s (%-10s) %s\n",
+                  "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
+                  "CRC", "Flags");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1058,7 +1521,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
                                                 common);
-                       batadv_tt_global_print_entry(tt_global, seq);
+                       batadv_tt_global_print_entry(bat_priv, tt_global, seq);
                }
                rcu_read_unlock();
        }
@@ -1080,6 +1543,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_safe(orig_entry, safe, head, list) {
                hlist_del_rcu(&orig_entry->list);
+               batadv_tt_global_size_dec(orig_entry->orig_node,
+                                         tt_global_entry->common.vid);
                batadv_tt_orig_list_entry_free_ref(orig_entry);
        }
        spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1094,16 +1559,21 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
        struct hlist_head *head;
        struct hlist_node *safe;
        struct batadv_tt_orig_list_entry *orig_entry;
+       unsigned short vid;
 
        spin_lock_bh(&tt_global_entry->list_lock);
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_safe(orig_entry, safe, head, list) {
                if (orig_entry->orig_node == orig_node) {
+                       vid = tt_global_entry->common.vid;
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting %pM from global tt entry %pM: %s\n",
+                                  "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
                                   orig_node->orig,
-                                  tt_global_entry->common.addr, message);
+                                  tt_global_entry->common.addr,
+                                  BATADV_PRINT_VID(vid), message);
                        hlist_del_rcu(&orig_entry->list);
+                       batadv_tt_global_size_dec(orig_node,
+                                                 tt_global_entry->common.vid);
                        batadv_tt_orig_list_entry_free_ref(orig_entry);
                }
        }
@@ -1150,17 +1620,25 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
                                                orig_node, message);
 }
 
-
-
+/**
+ * batadv_tt_global_del - remove a client from the global table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: an originator serving this client
+ * @addr: the mac address of the client
+ * @vid: VLAN identifier
+ * @message: a message explaining the reason for deleting the client to print
+ *  for debugging purpose
+ * @roaming: true if the deletion has been triggered by a roaming event
+ */
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
-                                const unsigned char *addr,
+                                const unsigned char *addr, unsigned short vid,
                                 const char *message, bool roaming)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        struct batadv_tt_local_entry *local_entry = NULL;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -1189,7 +1667,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
         *    the global entry, since it is useless now.
         */
        local_entry = batadv_tt_local_hash_find(bat_priv,
-                                               tt_global_entry->common.addr);
+                                               tt_global_entry->common.addr,
+                                               vid);
        if (local_entry) {
                /* local entry exists, case 2: client roamed to us. */
                batadv_tt_global_del_orig_list(tt_global_entry);
@@ -1207,8 +1686,18 @@ out:
                batadv_tt_local_entry_free_ref(local_entry);
 }
 
+/**
+ * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
+ *  given originator matching the provided vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator owning the entries to remove
+ * @match_vid: the VLAN identifier to match. If negative all the entries will be
+ *  removed
+ * @message: debug message to print as "reason"
+ */
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
+                              int32_t match_vid,
                               const char *message)
 {
        struct batadv_tt_global_entry *tt_global;
@@ -1218,6 +1707,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct hlist_node *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       unsigned short vid;
 
        if (!hash)
                return;
@@ -1229,6 +1719,10 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                spin_lock_bh(list_lock);
                hlist_for_each_entry_safe(tt_common_entry, safe,
                                          head, hash_entry) {
+                       /* remove only matching entries */
+                       if (match_vid >= 0 && tt_common_entry->vid != match_vid)
+                               continue;
+
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
                                                 common);
@@ -1237,9 +1731,11 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                                                        orig_node, message);
 
                        if (hlist_empty(&tt_global->orig_list)) {
+                               vid = tt_global->common.vid;
                                batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                          "Deleting global tt entry %pM: %s\n",
-                                          tt_global->common.addr, message);
+                                          "Deleting global tt entry %pM (vid: %d): %s\n",
+                                          tt_global->common.addr,
+                                          BATADV_PRINT_VID(vid), message);
                                hlist_del_rcu(&tt_common_entry->hash_entry);
                                batadv_tt_global_entry_free_ref(tt_global);
                        }
@@ -1297,8 +1793,10 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
                                continue;
 
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting global tt entry (%pM): %s\n",
-                                  tt_global->common.addr, msg);
+                                  "Deleting global tt entry %pM (vid: %d): %s\n",
+                                  tt_global->common.addr,
+                                  BATADV_PRINT_VID(tt_global->common.vid),
+                                  msg);
 
                        hlist_del_rcu(&tt_common->hash_entry);
 
@@ -1357,23 +1855,49 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
        return ret;
 }
 
+/**
+ * batadv_transtable_search - get the mesh destination for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of the source client
+ * @addr: mac address of the destination client
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the originator that was selected as destination in the
+ * mesh for contacting the client 'addr', NULL otherwise.
+ * In case of multiple originators serving the same client, the function returns
+ * the best one (best in terms of metric towards the destination node).
+ *
+ * If the two clients are AP isolated the function returns NULL.
+ */
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                                                  const uint8_t *src,
-                                                 const uint8_t *addr)
+                                                 const uint8_t *addr,
+                                                 unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry = NULL;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_tt_orig_list_entry *best_entry;
+       bool ap_isolation_enabled = false;
+       struct batadv_softif_vlan *vlan;
+
+       /* if the AP isolation is requested on a VLAN, then check for its
+        * setting in the proper VLAN private data structure
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (vlan) {
+               ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
+               batadv_softif_vlan_free_ref(vlan);
+       }
 
-       if (src && atomic_read(&bat_priv->ap_isolation)) {
-               tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
+       if (src && ap_isolation_enabled) {
+               tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
                if (!tt_local_entry ||
                    (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
                        goto out;
        }
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -1385,7 +1909,7 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                goto out;
 
        rcu_read_lock();
-       best_entry = batadv_transtable_best_orig(tt_global_entry);
+       best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
        /* found anything? */
        if (best_entry)
                orig_node = best_entry->orig_node;
@@ -1402,17 +1926,40 @@ out:
        return orig_node;
 }
 
-/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
-                                    struct batadv_orig_node *orig_node)
+/**
+ * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ *  to the given orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator for which the CRC should be computed
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * This function computes the checksum for the global table corresponding to a
+ * specific originator. In particular, the checksum is computed as follows: For
+ * each client connected to the originator the CRC32C of the MAC address and the
+ * VID is computed and then all the CRC32Cs of the various clients are xor'ed
+ * together.
+ *
+ * The idea behind is that CRC32C should be used as much as possible in order to
+ * produce a unique hash of the table, but since the order which is used to feed
+ * the CRC32C function affects the result and since every node in the network
+ * probably sorts the clients differently, the hash function cannot be directly
+ * computed over the entire table. Hence the CRC32C is used only on
+ * the single client entry, while all the results are then xor'ed together
+ * because the XOR operation can combine them all while trying to reduce the
+ * noise as much as possible.
+ *
+ * Returns the checksum of the global table of a given originator.
+ */
+static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+                                    struct batadv_orig_node *orig_node,
+                                    unsigned short vid)
 {
-       uint16_t total = 0, total_one;
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_head *head;
-       uint32_t i;
-       int j;
+       uint32_t i, crc_tmp, crc = 0;
+       uint8_t flags;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1422,6 +1969,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        tt_global = container_of(tt_common,
                                                 struct batadv_tt_global_entry,
                                                 common);
+                       /* compute the CRC only for entries belonging to the
+                        * VLAN identified by the vid passed as parameter
+                        */
+                       if (tt_common->vid != vid)
+                               continue;
+
                        /* Roaming clients are in the global table for
                         * consistency only. They don't have to be
                         * taken into account while computing the
@@ -1443,48 +1996,74 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                                             orig_node))
                                continue;
 
-                       total_one = 0;
-                       for (j = 0; j < ETH_ALEN; j++)
-                               total_one = crc16_byte(total_one,
-                                                      tt_common->addr[j]);
-                       total ^= total_one;
+                       crc_tmp = crc32c(0, &tt_common->vid,
+                                        sizeof(tt_common->vid));
+
+                       /* compute the CRC on flags that have to be kept in sync
+                        * among nodes
+                        */
+                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+                       crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
                }
                rcu_read_unlock();
        }
 
-       return total;
+       return crc;
 }
 
-/* Calculates the checksum of the local table */
-static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_crc - calculates the checksum of the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * For details about the computation, please refer to the documentation for
+ * batadv_tt_global_crc().
+ *
+ * Returns the checksum of the local table
+ */
+static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
+                                   unsigned short vid)
 {
-       uint16_t total = 0, total_one;
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct hlist_head *head;
-       uint32_t i;
-       int j;
+       uint32_t i, crc_tmp, crc = 0;
+       uint8_t flags;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+                       /* compute the CRC only for entries belonging to the
+                        * VLAN identified by vid
+                        */
+                       if (tt_common->vid != vid)
+                               continue;
+
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC
                         */
                        if (tt_common->flags & BATADV_TT_CLIENT_NEW)
                                continue;
-                       total_one = 0;
-                       for (j = 0; j < ETH_ALEN; j++)
-                               total_one = crc16_byte(total_one,
-                                                      tt_common->addr[j]);
-                       total ^= total_one;
+
+                       crc_tmp = crc32c(0, &tt_common->vid,
+                                        sizeof(tt_common->vid));
+
+                       /* compute the CRC on flags that have to be kept in sync
+                        * among nodes
+                        */
+                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+                       crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
                }
                rcu_read_unlock();
        }
 
-       return total;
+       return crc;
 }
 
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
@@ -1503,11 +2082,9 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
                                       struct batadv_orig_node *orig_node,
-                                      const unsigned char *tt_buff,
-                                      uint8_t tt_num_changes)
+                                      const void *tt_buff,
+                                      uint16_t tt_buff_len)
 {
-       uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
-
        /* Replace the old buffer only if I received something in the
         * last OGM (the OGM could carry no changes)
         */
@@ -1569,9 +2146,14 @@ unlock:
        return tt_req_node;
 }
 
-/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int batadv_tt_local_valid_entry(const void *entry_ptr,
-                                      const void *data_ptr)
+/**
+ * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * @entry_ptr: to be checked local tt entry
+ * @data_ptr: not used but definition required to satisfy the callback prototype
+ *
+ * Returns 1 if the entry is a valid, 0 otherwise.
+ */
+static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
        const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 
@@ -1598,41 +2180,30 @@ static int batadv_tt_global_valid(const void *entry_ptr,
        return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
-static struct sk_buff *
-batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
-                             struct batadv_hashtable *hash,
-                             struct batadv_priv *bat_priv,
-                             int (*valid_cb)(const void *, const void *),
-                             void *cb_data)
+/**
+ * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ *  specified tt hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the tt entries
+ * @tt_len: expected tvlv tt data buffer length in number of bytes
+ * @tvlv_buff: pointer to the buffer to fill with the TT data
+ * @valid_cb: function to filter tt change entries
+ * @cb_data: data passed to the filter function as argument
+ */
+static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+                                   struct batadv_hashtable *hash,
+                                   void *tvlv_buff, uint16_t tt_len,
+                                   int (*valid_cb)(const void *, const void *),
+                                   void *cb_data)
 {
        struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_query_packet *tt_response;
-       struct batadv_tt_change *tt_change;
+       struct batadv_tvlv_tt_change *tt_change;
        struct hlist_head *head;
-       struct sk_buff *skb = NULL;
-       uint16_t tt_tot, tt_count;
-       ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
+       uint16_t tt_tot, tt_num_entries = 0;
        uint32_t i;
-       size_t len;
 
-       if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
-               tt_len = bat_priv->soft_iface->mtu - tt_query_size;
-               tt_len -= tt_len % sizeof(struct batadv_tt_change);
-       }
-       tt_tot = tt_len / sizeof(struct batadv_tt_change);
-
-       len = tt_query_size + tt_len;
-       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!skb)
-               goto out;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
-       tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
-       tt_response->ttvn = ttvn;
-
-       tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
-       tt_count = 0;
+       tt_tot = batadv_tt_entries(tt_len);
+       tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
 
        rcu_read_lock();
        for (i = 0; i < hash->size; i++) {
@@ -1640,7 +2211,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
 
                hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
-                       if (tt_count == tt_tot)
+                       if (tt_tot == tt_num_entries)
                                break;
 
                        if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
@@ -1649,33 +2220,123 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                        memcpy(tt_change->addr, tt_common_entry->addr,
                               ETH_ALEN);
                        tt_change->flags = tt_common_entry->flags;
+                       tt_change->vid = htons(tt_common_entry->vid);
+                       tt_change->reserved = 0;
 
-                       tt_count++;
+                       tt_num_entries++;
                        tt_change++;
                }
        }
        rcu_read_unlock();
+}
 
-       /* store in the message the number of entries we have successfully
-        * copied
-        */
-       tt_response->tt_data = htons(tt_count);
+/**
+ * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * @orig_node: originator for which the CRCs have to be checked
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @num_vlan: number of tvlv VLAN entries
+ * @create: if true, create VLAN objects if not found
+ *
+ * Return true if all the received CRCs match the locally stored ones, false
+ * otherwise
+ */
+static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
+                                      struct batadv_tvlv_tt_vlan_data *tt_vlan,
+                                      uint16_t num_vlan)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
+       struct batadv_orig_node_vlan *vlan;
+       int i;
 
-out:
-       return skb;
+       /* check if each received CRC matches the locally stored one */
+       for (i = 0; i < num_vlan; i++) {
+               tt_vlan_tmp = tt_vlan + i;
+
+               /* if orig_node is a backbone node for this VLAN, don't check
+                * the CRC as we ignore all the global entries over it
+                */
+               if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
+                                                  orig_node->orig,
+                                                  ntohs(tt_vlan_tmp->vid)))
+                       continue;
+
+               vlan = batadv_orig_node_vlan_get(orig_node,
+                                                ntohs(tt_vlan_tmp->vid));
+               if (!vlan)
+                       return false;
+
+               if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * batadv_tt_local_update_crc - update all the local CRCs
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
+{
+       struct batadv_softif_vlan *vlan;
+
+       /* recompute the global CRC for each VLAN */
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node for which the CRCs have to be updated
+ */
+static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
+                                       struct batadv_orig_node *orig_node)
+{
+       struct batadv_orig_node_vlan *vlan;
+       uint32_t crc;
+
+       /* recompute the global CRC for each VLAN */
+       rcu_read_lock();
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               /* if orig_node is a backbone node for this VLAN, don't compute
+                * the CRC as we ignore all the global entries over it
+                */
+               if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
+                                                  vlan->vid))
+                       continue;
+
+               crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
+               vlan->tt.crc = crc;
+       }
+       rcu_read_unlock();
 }
 
+/**
+ * batadv_send_tt_request - send a TT Request message to a given node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst_orig_node: the destination of the message
+ * @ttvn: the version number that the source of the message is looking for
+ * @tt_vlan: pointer to the first tvlv VLAN object to request
+ * @num_vlan: number of tvlv VLAN entries
+ * @full_table: ask for the entire translation table if true, while only for the
+ *  last TT diff otherwise
+ */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *dst_orig_node,
-                                 uint8_t ttvn, uint16_t tt_crc,
-                                 bool full_table)
+                                 uint8_t ttvn,
+                                 struct batadv_tvlv_tt_vlan_data *tt_vlan,
+                                 uint16_t num_vlan, bool full_table)
 {
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_request;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
        struct batadv_tt_req_node *tt_req_node = NULL;
-       int ret = 1;
-       size_t tt_req_len;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
+       struct batadv_hard_iface *primary_if;
+       bool ret = false;
+       int i, size;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
@@ -1688,157 +2349,171 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto out;
 
-       skb = netdev_alloc_skb_ip_align(NULL, sizeof(*tt_request) + ETH_HLEN);
-       if (!skb)
+       size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+       tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
+       if (!tvlv_tt_data)
                goto out;
 
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
+       tvlv_tt_data->flags = BATADV_TT_REQUEST;
+       tvlv_tt_data->ttvn = ttvn;
+       tvlv_tt_data->num_vlan = htons(num_vlan);
 
-       tt_req_len = sizeof(*tt_request);
-       tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
+       /* send all the CRCs within the request. This is needed by intermediate
+        * nodes to ensure they have the correct table before replying
+        */
+       tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
+       for (i = 0; i < num_vlan; i++) {
+               tt_vlan_req->vid = tt_vlan->vid;
+               tt_vlan_req->crc = tt_vlan->crc;
 
-       tt_request->header.packet_type = BATADV_TT_QUERY;
-       tt_request->header.version = BATADV_COMPAT_VERSION;
-       memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
-       tt_request->header.ttl = BATADV_TTL;
-       tt_request->ttvn = ttvn;
-       tt_request->tt_data = htons(tt_crc);
-       tt_request->flags = BATADV_TT_REQUEST;
+               tt_vlan_req++;
+               tt_vlan++;
+       }
 
        if (full_table)
-               tt_request->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
-                  dst_orig_node->orig, (full_table ? 'F' : '.'));
+                  dst_orig_node->orig, full_table ? 'F' : '.');
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
-
-       if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                dst_orig_node->orig, BATADV_TVLV_TT, 1,
+                                tvlv_tt_data, size);
+       ret = true;
 
 out:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
-       if (ret)
-               kfree_skb(skb);
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
                list_del(&tt_req_node->list);
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
+       kfree(tvlv_tt_data);
        return ret;
 }
 
-static bool
-batadv_send_other_tt_response(struct batadv_priv *bat_priv,
-                             struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_other_tt_response - send reply to tt request concerning another
+ *  node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+                                         struct batadv_tvlv_tt_data *tt_data,
+                                         uint8_t *req_src, uint8_t *req_dst)
 {
        struct batadv_orig_node *req_dst_orig_node;
        struct batadv_orig_node *res_dst_orig_node = NULL;
-       uint8_t orig_ttvn, req_ttvn, ttvn;
-       int res, ret = false;
-       unsigned char *tt_buff;
-       bool full_table;
-       uint16_t tt_len, tt_tot;
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_response;
-       uint8_t *packet_pos;
-       size_t len;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       bool ret = false, full_table;
+       uint8_t orig_ttvn, req_ttvn;
+       uint16_t tvlv_len;
+       int32_t tt_len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
-                  tt_request->src, tt_request->ttvn, tt_request->dst,
-                  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  req_src, tt_data->ttvn, req_dst,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
        /* Let's get the orig node of the REAL destination */
-       req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
+       req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
        if (!req_dst_orig_node)
                goto out;
 
-       res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+       res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
        if (!res_dst_orig_node)
                goto out;
 
        orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-       req_ttvn = tt_request->ttvn;
+       req_ttvn = tt_data->ttvn;
 
-       /* I don't have the requested data */
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+       /* this node doesn't have the requested data */
        if (orig_ttvn != req_ttvn ||
-           tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
+           !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+                                       ntohs(tt_data->num_vlan)))
                goto out;
 
        /* If the full table has been explicitly requested */
-       if (tt_request->flags & BATADV_TT_FULL_TABLE ||
+       if (tt_data->flags & BATADV_TT_FULL_TABLE ||
            !req_dst_orig_node->tt_buff)
                full_table = true;
        else
                full_table = false;
 
-       /* In this version, fragmentation is not implemented, then
-        * I'll send only one packet with as much TT entries as I can
+       /* TT fragmentation hasn't been implemented yet, so send as many
+        * TT entries fit a single packet as possible only
         */
        if (!full_table) {
                spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
                tt_len = req_dst_orig_node->tt_buff_len;
-               tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-               len = sizeof(*tt_response) + tt_len;
-               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-               if (!skb)
+               tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+                                                             &tvlv_tt_data,
+                                                             &tt_change,
+                                                             &tt_len);
+               if (!tt_len)
                        goto unlock;
 
-               skb->priority = TC_PRIO_CONTROL;
-               skb_reserve(skb, ETH_HLEN);
-               packet_pos = skb_put(skb, len);
-               tt_response = (struct batadv_tt_query_packet *)packet_pos;
-               tt_response->ttvn = req_ttvn;
-               tt_response->tt_data = htons(tt_tot);
-
-               tt_buff = skb->data + sizeof(*tt_response);
                /* Copy the last orig_node's OGM buffer */
-               memcpy(tt_buff, req_dst_orig_node->tt_buff,
+               memcpy(tt_change, req_dst_orig_node->tt_buff,
                       req_dst_orig_node->tt_buff_len);
-
                spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
-               tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-
-               skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt.global_hash,
-                                                   bat_priv,
-                                                   batadv_tt_global_valid,
-                                                   req_dst_orig_node);
-               if (!skb)
+               /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+                * in the initial part
+                */
+               tt_len = -1;
+               tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+                                                             &tvlv_tt_data,
+                                                             &tt_change,
+                                                             &tt_len);
+               if (!tt_len)
                        goto out;
 
-               tt_response = (struct batadv_tt_query_packet *)skb->data;
+               /* fill the rest of the tvlv with the real TT entries */
+               batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+                                       tt_change, tt_len,
+                                       batadv_tt_global_valid,
+                                       req_dst_orig_node);
+       }
+
+       /* Don't send the response, if larger than fragmented packet. */
+       tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
+       if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
+               net_ratelimited_function(batadv_info, bat_priv->soft_iface,
+                                        "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
+                                        res_dst_orig_node->orig);
+               goto out;
        }
 
-       tt_response->header.packet_type = BATADV_TT_QUERY;
-       tt_response->header.version = BATADV_COMPAT_VERSION;
-       tt_response->header.ttl = BATADV_TTL;
-       memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
-       memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-       tt_response->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->ttvn = req_ttvn;
 
        if (full_table)
-               tt_response->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
-                  res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
+                  "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
+                  res_dst_orig_node->orig, req_dst_orig_node->orig,
+                  full_table ? 'F' : '.', req_ttvn);
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       res = batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL);
-       if (res != NET_XMIT_DROP)
-               ret = true;
+       batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
+                                req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+                                tvlv_len);
 
+       ret = true;
        goto out;
 
 unlock:
@@ -1849,37 +2524,43 @@ out:
                batadv_orig_node_free_ref(res_dst_orig_node);
        if (req_dst_orig_node)
                batadv_orig_node_free_ref(req_dst_orig_node);
-       if (!ret)
-               kfree_skb(skb);
+       kfree(tvlv_tt_data);
        return ret;
 }
 
-static bool
-batadv_send_my_tt_response(struct batadv_priv *bat_priv,
-                          struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_my_tt_response - send reply to tt request concerning this node's
+ *  translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+                                      struct batadv_tvlv_tt_data *tt_data,
+                                      uint8_t *req_src)
 {
-       struct batadv_orig_node *orig_node;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
        struct batadv_hard_iface *primary_if = NULL;
-       uint8_t my_ttvn, req_ttvn, ttvn;
-       int ret = false;
-       unsigned char *tt_buff;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_orig_node *orig_node;
+       uint8_t my_ttvn, req_ttvn;
+       uint16_t tvlv_len;
        bool full_table;
-       uint16_t tt_len, tt_tot;
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_response;
-       uint8_t *packet_pos;
-       size_t len;
+       int32_t tt_len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
-                  tt_request->src, tt_request->ttvn,
-                  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  req_src, tt_data->ttvn,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
+       spin_lock_bh(&bat_priv->tt.commit_lock);
 
        my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-       req_ttvn = tt_request->ttvn;
+       req_ttvn = tt_data->ttvn;
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+       orig_node = batadv_orig_hash_find(bat_priv, req_src);
        if (!orig_node)
                goto out;
 
@@ -1890,103 +2571,104 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        /* If the full table has been explicitly requested or the gap
         * is too big send the whole local translation table
         */
-       if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
+       if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
            !bat_priv->tt.last_changeset)
                full_table = true;
        else
                full_table = false;
 
-       /* In this version, fragmentation is not implemented, then
-        * I'll send only one packet with as much TT entries as I can
+       /* TT fragmentation hasn't been implemented yet, so send as many
+        * TT entries fit a single packet as possible only
         */
        if (!full_table) {
                spin_lock_bh(&bat_priv->tt.last_changeset_lock);
-               tt_len = bat_priv->tt.last_changeset_len;
-               tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-               len = sizeof(*tt_response) + tt_len;
-               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-               if (!skb)
+               tt_len = bat_priv->tt.last_changeset_len;
+               tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+               if (!tt_len)
                        goto unlock;
 
-               skb->priority = TC_PRIO_CONTROL;
-               skb_reserve(skb, ETH_HLEN);
-               packet_pos = skb_put(skb, len);
-               tt_response = (struct batadv_tt_query_packet *)packet_pos;
-               tt_response->ttvn = req_ttvn;
-               tt_response->tt_data = htons(tt_tot);
-
-               tt_buff = skb->data + sizeof(*tt_response);
-               memcpy(tt_buff, bat_priv->tt.last_changeset,
+               /* Copy the last orig_node's OGM buffer */
+               memcpy(tt_change, bat_priv->tt.last_changeset,
                       bat_priv->tt.last_changeset_len);
                spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
-               tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-
-               skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt.local_hash,
-                                                   bat_priv,
-                                                   batadv_tt_local_valid_entry,
-                                                   NULL);
-               if (!skb)
+               req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+
+               /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+                * in the initial part
+                */
+               tt_len = -1;
+               tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+               if (!tt_len)
                        goto out;
 
-               tt_response = (struct batadv_tt_query_packet *)skb->data;
+               /* fill the rest of the tvlv with the real TT entries */
+               batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+                                       tt_change, tt_len,
+                                       batadv_tt_local_valid, NULL);
        }
 
-       tt_response->header.packet_type = BATADV_TT_QUERY;
-       tt_response->header.version = BATADV_COMPAT_VERSION;
-       tt_response->header.ttl = BATADV_TTL;
-       memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-       tt_response->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->ttvn = req_ttvn;
 
        if (full_table)
-               tt_response->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending TT_RESPONSE to %pM [%c]\n",
-                  orig_node->orig,
-                  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
+                  orig_node->orig, full_table ? 'F' : '.', req_ttvn);
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = true;
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+                                tvlv_len);
+
        goto out;
 
 unlock:
        spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
-       if (!ret)
-               kfree_skb(skb);
-       /* This packet was for me, so it doesn't need to be re-routed */
+       kfree(tvlv_tt_data);
+       /* The packet was for this host, so it doesn't need to be re-routed */
        return true;
 }
 
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-                            struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_tt_response - send reply to tt request
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+                                   struct batadv_tvlv_tt_data *tt_data,
+                                   uint8_t *req_src, uint8_t *req_dst)
 {
-       if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
-               /* don't answer backbone gws! */
-               if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
-                       return true;
-
-               return batadv_send_my_tt_response(bat_priv, tt_request);
-       } else {
-               return batadv_send_other_tt_response(bat_priv, tt_request);
-       }
+       if (batadv_is_my_mac(bat_priv, req_dst))
+               return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
+       else
+               return batadv_send_other_tt_response(bat_priv, tt_data,
+                                                    req_src, req_dst);
 }
 
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                      struct batadv_orig_node *orig_node,
-                                     struct batadv_tt_change *tt_change,
+                                     struct batadv_tvlv_tt_change *tt_change,
                                      uint16_t tt_num_changes, uint8_t ttvn)
 {
        int i;
@@ -1997,11 +2679,13 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                        roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
                        batadv_tt_global_del(bat_priv, orig_node,
                                             (tt_change + i)->addr,
+                                            ntohs((tt_change + i)->vid),
                                             "tt removed by changes",
                                             roams);
                } else {
                        if (!batadv_tt_global_add(bat_priv, orig_node,
                                                  (tt_change + i)->addr,
+                                                 ntohs((tt_change + i)->vid),
                                                  (tt_change + i)->flags, ttvn))
                                /* In case of problem while storing a
                                 * global_entry, we stop the updating
@@ -2016,21 +2700,22 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
-                                 struct batadv_tt_query_packet *tt_response)
+                                 struct batadv_tvlv_tt_change *tt_change,
+                                 uint8_t ttvn, uint8_t *resp_src,
+                                 uint16_t num_entries)
 {
        struct batadv_orig_node *orig_node;
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+       orig_node = batadv_orig_hash_find(bat_priv, resp_src);
        if (!orig_node)
                goto out;
 
        /* Purge the old table first.. */
-       batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
+       batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+                                 "Received full table");
 
-       _batadv_tt_update_changes(bat_priv, orig_node,
-                                 (struct batadv_tt_change *)(tt_response + 1),
-                                 ntohs(tt_response->tt_data),
-                                 tt_response->ttvn);
+       _batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
+                                 ttvn);
 
        spin_lock_bh(&orig_node->tt_buff_lock);
        kfree(orig_node->tt_buff);
@@ -2038,7 +2723,7 @@ static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
        orig_node->tt_buff = NULL;
        spin_unlock_bh(&orig_node->tt_buff_lock);
 
-       atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+       atomic_set(&orig_node->last_ttvn, ttvn);
 
 out:
        if (orig_node)
@@ -2048,22 +2733,31 @@ out:
 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                     struct batadv_orig_node *orig_node,
                                     uint16_t tt_num_changes, uint8_t ttvn,
-                                    struct batadv_tt_change *tt_change)
+                                    struct batadv_tvlv_tt_change *tt_change)
 {
        _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
                                  tt_num_changes, ttvn);
 
-       batadv_tt_save_orig_buffer(bat_priv, orig_node,
-                                  (unsigned char *)tt_change, tt_num_changes);
+       batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
+                                  batadv_tt_len(tt_num_changes));
        atomic_set(&orig_node->last_ttvn, ttvn);
 }
 
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
+/**
+ * batadv_is_my_client - check if a client is served by the local node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac adress of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if the client is served by this node, false otherwise.
+ */
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+                        unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        bool ret = false;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
        /* Check if the client has been logically deleted (but is kept for
@@ -2079,72 +2773,68 @@ out:
        return ret;
 }
 
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-                              struct batadv_tt_query_packet *tt_response)
+/**
+ * batadv_handle_tt_response - process incoming tt reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @resp_src: mac address of tt reply sender
+ * @num_entries: number of tt change entries appended to the tt data
+ */
+static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+                                     struct batadv_tvlv_tt_data *tt_data,
+                                     uint8_t *resp_src, uint16_t num_entries)
 {
        struct batadv_tt_req_node *node, *safe;
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_tt_change *tt_change;
+       struct batadv_tvlv_tt_change *tt_change;
+       uint8_t *tvlv_ptr = (uint8_t *)tt_data;
+       uint16_t change_offset;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
-                  tt_response->src, tt_response->ttvn,
-                  ntohs(tt_response->tt_data),
-                  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
-
-       /* we should have never asked a backbone gw */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
-               goto out;
+                  resp_src, tt_data->ttvn, num_entries,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+       orig_node = batadv_orig_hash_find(bat_priv, resp_src);
        if (!orig_node)
                goto out;
 
-       if (tt_response->flags & BATADV_TT_FULL_TABLE) {
-               batadv_tt_fill_gtable(bat_priv, tt_response);
+       spin_lock_bh(&orig_node->tt_lock);
+
+       change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
+       change_offset *= ntohs(tt_data->num_vlan);
+       change_offset += sizeof(*tt_data);
+       tvlv_ptr += change_offset;
+
+       tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
+       if (tt_data->flags & BATADV_TT_FULL_TABLE) {
+               batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
+                                     resp_src, num_entries);
        } else {
-               tt_change = (struct batadv_tt_change *)(tt_response + 1);
-               batadv_tt_update_changes(bat_priv, orig_node,
-                                        ntohs(tt_response->tt_data),
-                                        tt_response->ttvn, tt_change);
+               batadv_tt_update_changes(bat_priv, orig_node, num_entries,
+                                        tt_data->ttvn, tt_change);
        }
 
+       /* Recalculate the CRC for this orig_node and store it */
+       batadv_tt_global_update_crc(bat_priv, orig_node);
+
+       spin_unlock_bh(&orig_node->tt_lock);
+
        /* Delete the tt_req_node from pending tt_requests list */
        spin_lock_bh(&bat_priv->tt.req_list_lock);
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-               if (!batadv_compare_eth(node->addr, tt_response->src))
+               if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
-       /* Recalculate the CRC for this orig_node and store it */
-       orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 out:
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
 }
 
-int batadv_tt_init(struct batadv_priv *bat_priv)
-{
-       int ret;
-
-       ret = batadv_tt_local_init(bat_priv);
-       if (ret < 0)
-               return ret;
-
-       ret = batadv_tt_global_init(bat_priv);
-       if (ret < 0)
-               return ret;
-
-       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
-                          msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
-
-       return 1;
-}
-
 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
@@ -2225,14 +2915,28 @@ unlock:
        return ret;
 }
 
+/**
+ * batadv_send_roam_adv - send a roaming advertisement message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ * @vid: VLAN identifier
+ * @orig_node: message destination
+ *
+ * Send a ROAMING_ADV message to the node which was previously serving this
+ * client. This is done to inform the node that from now on all traffic destined
+ * for this particular roamed client has to be forwarded to the sender of the
+ * roaming message.
+ */
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+                                unsigned short vid,
                                 struct batadv_orig_node *orig_node)
 {
-       struct sk_buff *skb = NULL;
-       struct batadv_roam_adv_packet *roam_adv_packet;
-       int ret = 1;
        struct batadv_hard_iface *primary_if;
-       size_t len = sizeof(*roam_adv_packet);
+       struct batadv_tvlv_roam_adv tvlv_roam;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
 
        /* before going on we have to check whether the client has
         * already roamed to us too many times
@@ -2240,40 +2944,22 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
        if (!batadv_tt_check_roam_count(bat_priv, client))
                goto out;
 
-       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!skb)
-               goto out;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
-
-       roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
-
-       roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
-       roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
-       roam_adv_packet->header.ttl = BATADV_TTL;
-       roam_adv_packet->reserved = 0;
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-       memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       batadv_hardif_free_ref(primary_if);
-       memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
-       memcpy(roam_adv_packet->client, client, ETH_ALEN);
-
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending ROAMING_ADV to %pM (client %pM)\n",
-                  orig_node->orig, client);
+                  "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
+                  orig_node->orig, client, BATADV_PRINT_VID(vid));
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
+       memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
+       tvlv_roam.vid = htons(vid);
+
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                orig_node->orig, BATADV_TVLV_ROAM, 1,
+                                &tvlv_roam, sizeof(tvlv_roam));
 
 out:
-       if (ret && skb)
-               kfree_skb(skb);
-       return;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
 }
 
 static void batadv_tt_purge(struct work_struct *work)
@@ -2286,7 +2972,7 @@ static void batadv_tt_purge(struct work_struct *work)
        priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
        bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
-       batadv_tt_local_purge(bat_priv);
+       batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
        batadv_tt_global_purge(bat_priv);
        batadv_tt_req_purge(bat_priv);
        batadv_tt_roam_purge(bat_priv);
@@ -2297,6 +2983,9 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
        cancel_delayed_work_sync(&bat_priv->tt.work);
 
        batadv_tt_local_table_free(bat_priv);
@@ -2308,19 +2997,25 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
        kfree(bat_priv->tt.last_changeset);
 }
 
-/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries
+/**
+ * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ *  table and possibly count them in the TT size
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: the flag to switch
+ * @enable: whether to set or unset the flag
+ * @count: whether to increase the TT size by the number of changed entries
  */
-static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
-                                   uint16_t flags, bool enable)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
+                                     uint16_t flags, bool enable, bool count)
 {
-       uint32_t i;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+       struct batadv_tt_common_entry *tt_common_entry;
        uint16_t changed_num = 0;
        struct hlist_head *head;
-       struct batadv_tt_common_entry *tt_common_entry;
+       uint32_t i;
 
        if (!hash)
-               goto out;
+               return;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -2338,11 +3033,15 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
                                tt_common_entry->flags &= ~flags;
                        }
                        changed_num++;
+
+                       if (!count)
+                               continue;
+
+                       batadv_tt_local_size_inc(bat_priv,
+                                                tt_common_entry->vid);
                }
                rcu_read_unlock();
        }
-out:
-       return changed_num;
 }
 
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
@@ -2370,10 +3069,11 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                continue;
 
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting local tt entry (%pM): pending\n",
-                                  tt_common->addr);
+                                  "Deleting local tt entry (%pM, vid: %d): pending\n",
+                                  tt_common->addr,
+                                  BATADV_PRINT_VID(tt_common->vid));
 
-                       atomic_dec(&bat_priv->tt.local_entry_num);
+                       batadv_tt_local_size_dec(bat_priv, tt_common->vid);
                        hlist_del_rcu(&tt_common->hash_entry);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
@@ -2384,22 +3084,25 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        }
 }
 
-static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
-                                   unsigned char **packet_buff,
-                                   int *packet_buff_len, int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ *  which have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Caller must hold tt->commit_lock.
+ */
+static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
-       uint16_t changed_num = 0;
-
-       if (atomic_read(&bat_priv->tt.local_changes) < 1)
-               return -ENOENT;
+       if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+               if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
+                       batadv_tt_tvlv_container_update(bat_priv);
+               return;
+       }
 
-       changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
-                                         BATADV_TT_CLIENT_NEW, false);
+       batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
 
-       /* all reset entries have to be counted as local entries */
-       atomic_add(changed_num, &bat_priv->tt.local_entry_num);
        batadv_tt_local_purge_pending_clients(bat_priv);
-       bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
+       batadv_tt_local_update_crc(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
        atomic_inc(&bat_priv->tt.vn);
@@ -2409,49 +3112,38 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
 
        /* reset the sending counter */
        atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
-
-       return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
-                                          packet_buff_len, packet_min_len);
+       batadv_tt_tvlv_container_update(bat_priv);
 }
 
-/* when calling this function (hard_iface == primary_if) has to be true */
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-                         unsigned char **packet_buff, int *packet_buff_len,
-                         int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ *  have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
 {
-       int tt_num_changes;
-
-       /* if at least one change happened */
-       tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
-                                                 packet_buff_len,
-                                                 packet_min_len);
-
-       /* if the changes have been sent often enough */
-       if ((tt_num_changes < 0) &&
-           (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
-               batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-                                             packet_min_len, packet_min_len);
-               tt_num_changes = 0;
-       }
-
-       return tt_num_changes;
+       spin_lock_bh(&bat_priv->tt.commit_lock);
+       batadv_tt_local_commit_changes_nolock(bat_priv);
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-                          uint8_t *dst)
+                          uint8_t *dst, unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry = NULL;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
+       struct batadv_softif_vlan *vlan;
        bool ret = false;
 
-       if (!atomic_read(&bat_priv->ap_isolation))
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan || !atomic_read(&vlan->ap_isolation))
                goto out;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
        if (!tt_local_entry)
                goto out;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -2461,6 +3153,8 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
        ret = true;
 
 out:
+       if (vlan)
+               batadv_softif_vlan_free_ref(vlan);
        if (tt_global_entry)
                batadv_tt_global_entry_free_ref(tt_global_entry);
        if (tt_local_entry)
@@ -2468,19 +3162,29 @@ out:
        return ret;
 }
 
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          const unsigned char *tt_buff, uint8_t tt_num_changes,
-                          uint8_t ttvn, uint16_t tt_crc)
+/**
+ * batadv_tt_update_orig - update global translation table with new tt
+ *  information received via ogms
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @tt_num_vlan: number of tvlv VLAN entries
+ * @tt_change: pointer to the first entry in the TT buffer
+ * @tt_num_changes: number of tt changes inside the tt buffer
+ * @ttvn: translation table version number of this changeset
+ * @tt_crc: crc32 checksum of orig node's translation table
+ */
+static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
+                                 const void *tt_buff, uint16_t tt_num_vlan,
+                                 struct batadv_tvlv_tt_change *tt_change,
+                                 uint16_t tt_num_changes, uint8_t ttvn)
 {
        uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
        bool full_table = true;
-       struct batadv_tt_change *tt_change;
-
-       /* don't care about a backbone gateways updates. */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-               return;
 
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
         */
@@ -2496,7 +3200,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                        goto request_table;
                }
 
-               tt_change = (struct batadv_tt_change *)tt_buff;
+               spin_lock_bh(&orig_node->tt_lock);
+
+               tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
                batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
                                         ttvn, tt_change);
 
@@ -2504,7 +3210,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                 * prefer to recompute it to spot any possible inconsistency
                 * in the global table
                 */
-               orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+               batadv_tt_global_update_crc(bat_priv, orig_node);
+
+               spin_unlock_bh(&orig_node->tt_lock);
 
                /* The ttvn alone is not enough to guarantee consistency
                 * because a single value could represent different states
@@ -2515,37 +3223,46 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                 * checking the CRC value is mandatory to detect the
                 * inconsistency
                 */
-               if (orig_node->tt_crc != tt_crc)
+               if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
+                                               tt_num_vlan))
                        goto request_table;
        } else {
                /* if we missed more than one change or our tables are not
                 * in sync anymore -> request fresh tt data
                 */
                if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
-                   orig_node->tt_crc != tt_crc) {
+                   !batadv_tt_global_check_crc(orig_node, tt_vlan,
+                                               tt_num_vlan)) {
 request_table:
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
-                                  orig_node->orig, ttvn, orig_ttvn, tt_crc,
-                                  orig_node->tt_crc, tt_num_changes);
+                                  "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
+                                  orig_node->orig, ttvn, orig_ttvn,
+                                  tt_num_changes);
                        batadv_send_tt_request(bat_priv, orig_node, ttvn,
-                                              tt_crc, full_table);
+                                              tt_vlan, tt_num_vlan,
+                                              full_table);
                        return;
                }
        }
 }
 
-/* returns true whether we know that the client has moved from its old
- * originator to another one. This entry is kept is still kept for consistency
- * purposes
+/**
+ * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if we know that the client has moved from its old originator
+ * to another one. This entry is still kept for consistency purposes and will be
+ * deleted later by a DEL or because of timeout
  */
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-                                       uint8_t *addr)
+                                       uint8_t *addr, unsigned short vid)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        bool ret = false;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -2558,19 +3275,20 @@ out:
 /**
  * batadv_tt_local_client_is_roaming - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
- * @addr: the MAC address of the local client to query
+ * @addr: the mac address of the local client to query
+ * @vid: VLAN identifier
  *
  * Returns true if the local client is known to be roaming (it is not served by
  * this node anymore) or not. If yes, the client is still present in the table
  * to keep the latter consistent with the node TTVN
  */
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-                                      uint8_t *addr)
+                                      uint8_t *addr, unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        bool ret = false;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
 
@@ -2582,26 +3300,268 @@ out:
 
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
-                                         const unsigned char *addr)
+                                         const unsigned char *addr,
+                                         unsigned short vid)
 {
        bool ret = false;
 
-       /* if the originator is a backbone node (meaning it belongs to the same
-        * LAN of this node) the temporary client must not be added because to
-        * reach such destination the node must use the LAN instead of the mesh
-        */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-               goto out;
-
-       if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+       if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
                                  BATADV_TT_CLIENT_TEMP,
                                  atomic_read(&orig_node->last_ttvn)))
                goto out;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Added temporary global client (addr: %pM orig: %pM)\n",
-                  addr, orig_node->orig);
+                  "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
+                  addr, BATADV_PRINT_VID(vid), orig_node->orig);
        ret = true;
 out:
        return ret;
 }
+
+/**
+ * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ *  maximum packet size that can be transported through the mesh
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Remove entries older than 'timeout' and half timeout if more entries need
+ * to be removed.
+ */
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+{
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+       int packet_size_max = atomic_read(&bat_priv->packet_size_max);
+       int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
+       bool reduced = false;
+
+       spin_lock_bh(&bat_priv->tt.commit_lock);
+
+       while (true) {
+               table_size = batadv_tt_local_table_transmit_size(bat_priv);
+               if (packet_size_max >= table_size)
+                       break;
+
+               batadv_tt_local_purge(bat_priv, timeout);
+               batadv_tt_local_purge_pending_clients(bat_priv);
+
+               timeout /= 2;
+               reduced = true;
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
+                                        packet_size_max);
+       }
+
+       /* commit these changes immediately, to avoid synchronization problem
+        * with the TTVN
+        */
+       if (reduced)
+               batadv_tt_local_commit_changes_nolock(bat_priv);
+
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags, void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_tvlv_tt_data *tt_data;
+       uint16_t num_entries, num_vlan;
+
+       if (tvlv_value_len < sizeof(*tt_data))
+               return;
+
+       tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+       tvlv_value_len -= sizeof(*tt_data);
+
+       num_vlan = ntohs(tt_data->num_vlan);
+
+       if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+               return;
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+       tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
+       tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+
+       num_entries = batadv_tt_entries(tvlv_value_len);
+
+       batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
+                             num_entries, tt_data->ttvn);
+}
+
+/**
+ * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ *  container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+                                            uint8_t *src, uint8_t *dst,
+                                            void *tvlv_value,
+                                            uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_tt_data *tt_data;
+       uint16_t tt_vlan_len, tt_num_entries;
+       char tt_flag;
+       bool ret;
+
+       if (tvlv_value_len < sizeof(*tt_data))
+               return NET_RX_SUCCESS;
+
+       tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+       tvlv_value_len -= sizeof(*tt_data);
+
+       tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
+       tt_vlan_len *= ntohs(tt_data->num_vlan);
+
+       if (tvlv_value_len < tt_vlan_len)
+               return NET_RX_SUCCESS;
+
+       tvlv_value_len -= tt_vlan_len;
+       tt_num_entries = batadv_tt_entries(tvlv_value_len);
+
+       switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
+       case BATADV_TT_REQUEST:
+               batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
+
+               /* If this node cannot provide a TT response the tt_request is
+                * forwarded
+                */
+               ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
+               if (!ret) {
+                       if (tt_data->flags & BATADV_TT_FULL_TABLE)
+                               tt_flag = 'F';
+                       else
+                               tt_flag = '.';
+
+                       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                                  "Routing TT_REQUEST to %pM [%c]\n",
+                                  dst, tt_flag);
+                       /* tvlv API will re-route the packet */
+                       return NET_RX_DROP;
+               }
+               break;
+       case BATADV_TT_RESPONSE:
+               batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+               if (batadv_is_my_mac(bat_priv, dst)) {
+                       batadv_handle_tt_response(bat_priv, tt_data,
+                                                 src, tt_num_entries);
+                       return NET_RX_SUCCESS;
+               }
+
+               if (tt_data->flags & BATADV_TT_FULL_TABLE)
+                       tt_flag =  'F';
+               else
+                       tt_flag = '.';
+
+               batadv_dbg(BATADV_DBG_TT, bat_priv,
+                          "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
+
+               /* tvlv API will re-route the packet */
+               return NET_RX_DROP;
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+                                              uint8_t *src, uint8_t *dst,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_roam_adv *roaming_adv;
+       struct batadv_orig_node *orig_node = NULL;
+
+       /* If this node is not the intended recipient of the
+        * roaming advertisement the packet is forwarded
+        * (the tvlv API will re-route the packet).
+        */
+       if (!batadv_is_my_mac(bat_priv, dst))
+               return NET_RX_DROP;
+
+       if (tvlv_value_len < sizeof(*roaming_adv))
+               goto out;
+
+       orig_node = batadv_orig_hash_find(bat_priv, src);
+       if (!orig_node)
+               goto out;
+
+       batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+       roaming_adv = (struct batadv_tvlv_roam_adv *)tvlv_value;
+
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Received ROAMING_ADV from %pM (client %pM)\n",
+                  src, roaming_adv->client);
+
+       batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
+                            ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
+                            atomic_read(&orig_node->last_ttvn) + 1);
+
+out:
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tt_init - initialise the translation table internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return 0 on success or negative error number in case of failure.
+ */
+int batadv_tt_init(struct batadv_priv *bat_priv)
+{
+       int ret;
+
+       /* synchronized flags must be remote */
+       BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
+
+       ret = batadv_tt_local_init(bat_priv);
+       if (ret < 0)
+               return ret;
+
+       ret = batadv_tt_global_init(bat_priv);
+       if (ret < 0)
+               return ret;
+
+       batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+                                    batadv_tt_tvlv_unicast_handler_v1,
+                                    BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
+
+       batadv_tvlv_handler_register(bat_priv, NULL,
+                                    batadv_roam_tvlv_unicast_handler_v1,
+                                    BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
+
+       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+                          msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+
+       return 1;
+}
index 659a3bb759ce87087c4d5697239f4724c815971c..026b1ffa674699ea7561df5456dd0c91f15d3521 100644 (file)
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-int batadv_tt_len(int changes_num);
 int batadv_tt_init(struct batadv_priv *bat_priv);
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-                        int ifindex);
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+                        unsigned short vid, int ifindex);
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-                               const uint8_t *addr, const char *message,
-                               bool roaming);
+                               const uint8_t *addr, unsigned short vid,
+                               const char *message, bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
-                              struct batadv_orig_node *orig_node,
-                              const unsigned char *tt_buff, int tt_buff_len);
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-                        struct batadv_orig_node *orig_node,
-                        const unsigned char *addr, uint16_t flags,
-                        uint8_t ttvn);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
-                              const char *message);
+                              int32_t match_vid, const char *message);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                                                  const uint8_t *src,
-                                                 const uint8_t *addr);
+                                                 const uint8_t *addr,
+                                                 unsigned short vid);
 void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-                            struct batadv_tt_query_packet *tt_request);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-                              struct batadv_tt_query_packet *tt_response);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+                        unsigned short vid);
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-                          uint8_t *dst);
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          const unsigned char *tt_buff, uint8_t tt_num_changes,
-                          uint8_t ttvn, uint16_t tt_crc);
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-                         unsigned char **packet_buff, int *packet_buff_len,
-                         int packet_min_len);
+                          uint8_t *dst, unsigned short vid);
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-                                       uint8_t *addr);
+                                       uint8_t *addr, unsigned short vid);
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-                                      uint8_t *addr);
+                                      uint8_t *addr, unsigned short vid);
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
-                                         const unsigned char *addr);
+                                         const unsigned char *addr,
+                                         unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
index b2c94e1393191e00165e42e117ded3dd2374f70f..91dd369b0ff21790b9fae41b88005be95bf0ffe4 100644 (file)
 #include "bitarray.h"
 #include <linux/kernel.h>
 
-/**
- * Maximum overhead for the encapsulation for a payload packet
- */
-#define BATADV_HEADER_LEN \
-       (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
-                       sizeof(struct batadv_bcast_packet)))
-
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
 
 #endif /* CONFIG_BATMAN_ADV_DAT */
 
+/**
+ * BATADV_TT_REMOTE_MASK - bitmask selecting the flags that are sent over the
+ *  wire only
+ */
+#define BATADV_TT_REMOTE_MASK  0x00FF
+
+/**
+ * BATADV_TT_SYNC_MASK - bitmask of the flags that need to be kept in sync
+ *  among the nodes. These flags are used to compute the global/local CRC
+ */
+#define BATADV_TT_SYNC_MASK    0x00F0
+
 /**
  * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
  * @ogm_buff: buffer holding the OGM packet
@@ -60,7 +65,6 @@ struct batadv_hard_iface_bat_iv {
  * @if_num: identificator of the interface
  * @if_status: status of the interface for batman-adv
  * @net_dev: pointer to the net_device
- * @frag_seqno: last fragment sequence number sent by this interface
  * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
  * @hardif_obj: kobject of the per interface sysfs "mesh" directory
  * @refcount: number of contexts the object is used
@@ -76,7 +80,6 @@ struct batadv_hard_iface {
        int16_t if_num;
        char if_status;
        struct net_device *net_dev;
-       atomic_t frag_seqno;
        uint8_t num_bcasts;
        struct kobject *hardif_obj;
        atomic_t refcount;
@@ -87,29 +90,98 @@ struct batadv_hard_iface {
        struct work_struct cleanup_work;
 };
 
+/**
+ * struct batadv_frag_table_entry - head in the fragment buffer table
+ * @head: head of list with fragments
+ * @lock: lock to protect the list of fragments
+ * @timestamp: time (jiffie) of last received fragment
+ * @seqno: sequence number of the fragments in the list
+ * @size: accumulated size of packets in list
+ */
+struct batadv_frag_table_entry {
+       struct hlist_head head;
+       spinlock_t lock; /* protects head */
+       unsigned long timestamp;
+       uint16_t seqno;
+       uint16_t size;
+};
+
+/**
+ * struct batadv_frag_list_entry - entry in a list of fragments
+ * @list: list node information
+ * @skb: fragment
+ * @no: fragment number in the set
+ */
+struct batadv_frag_list_entry {
+       struct hlist_node list;
+       struct sk_buff *skb;
+       uint8_t no;
+};
+
+/**
+ * struct batadv_vlan_tt - VLAN specific TT attributes
+ * @crc: CRC32 checksum of the entries belonging to this vlan
+ * @num_entries: number of TT entries for this VLAN
+ */
+struct batadv_vlan_tt {
+       uint32_t crc;
+       atomic_t num_entries;
+};
+
+/**
+ * batadv_orig_node_vlan - VLAN specific data per orig_node
+ * @vid: the VLAN identifier
+ * @tt: VLAN specific TT attributes
+ * @list: list node for orig_node::vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_orig_node_vlan {
+       unsigned short vid;
+       struct batadv_vlan_tt tt;
+       struct list_head list;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
+ * @bcast_own: bitfield containing the number of our OGMs this orig_node
+ *  rebroadcasted "back" to us (relative to last_real_seqno)
+ * @bcast_own_sum: counted result of bcast_own
+ * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
+ *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ */
+struct batadv_orig_bat_iv {
+       unsigned long *bcast_own;
+       uint8_t *bcast_own_sum;
+       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+        * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+        */
+       spinlock_t ogm_cnt_lock;
+};
+
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
  * @orig: originator ethernet address
  * @primary_addr: hosts primary interface address
  * @router: router that should be used to reach this originator
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- *  rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
  * @batman_seqno_reset: time when the batman seqno window was reset
- * @gw_flags: flags related to gateway class
- * @flags: for now only VIS_SERVER flag
+ * @capabilities: announced capabilities of this originator
  * @last_ttvn: last seen translation table version number
- * @tt_crc: CRC of the translation table
  * @tt_buff: last tt changeset this node received from the orig node
  * @tt_buff_len: length of the last tt changeset this node received from the
  *  orig node
  * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_size: number of global TT entries announced by the orig node
  * @tt_initialised: bool keeping track of whether or not this node have received
  *  any translation table information from the orig node yet
+ * @tt_lock: prevents from updating the table while reading it. Table update is
+ *  made up by two operations (data structure update and metdata -CRC/TTVN-
+ *  recalculation) and they have to be executed atomically in order to avoid
+ *  another thread to read the table/metadata between those.
  * @last_real_seqno: last and best known sequence number
  * @last_ttl: ttl of last received packet
  * @bcast_bits: bitfield containing the info which payload broadcast originated
@@ -117,14 +189,9 @@ struct batadv_hard_iface {
  *  last_bcast_seqno)
  * @last_bcast_seqno: last broadcast sequence number received by this host
  * @neigh_list: list of potential next hop neighbor towards this orig node
- * @frag_list: fragmentation buffer list for fragment re-assembly
- * @last_frag_packet: time when last fragmented packet from this node was
- *  received
  * @neigh_list_lock: lock protecting neigh_list, router and bonding_list
  * @hash_entry: hlist node for batadv_priv::orig_hash
  * @bat_priv: pointer to soft_iface this orig node belongs to
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->real_bits & neigh_node->real_packet_count
  * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
  * @bond_candidates: how many candidates are available
  * @bond_list: list of bonding candidates
@@ -134,6 +201,11 @@ struct batadv_hard_iface {
  * @out_coding_list: list of nodes that can hear this orig
  * @in_coding_list_lock: protects in_coding_list
  * @out_coding_list_lock: protects out_coding_list
+ * @fragments: array with heads for fragment chains
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
+ *  originator represented by this object
+ * @vlan_list_lock: lock protecting vlan_list
+ * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
        uint8_t orig[ETH_ALEN];
@@ -142,35 +214,26 @@ struct batadv_orig_node {
 #ifdef CONFIG_BATMAN_ADV_DAT
        batadv_dat_addr_t dat_addr;
 #endif
-       unsigned long *bcast_own;
-       uint8_t *bcast_own_sum;
        unsigned long last_seen;
        unsigned long bcast_seqno_reset;
        unsigned long batman_seqno_reset;
-       uint8_t gw_flags;
-       uint8_t flags;
+       uint8_t capabilities;
        atomic_t last_ttvn;
-       uint16_t tt_crc;
        unsigned char *tt_buff;
        int16_t tt_buff_len;
        spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-       atomic_t tt_size;
        bool tt_initialised;
+       /* prevents from changing the table while reading it */
+       spinlock_t tt_lock;
        uint32_t last_real_seqno;
        uint8_t last_ttl;
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
        uint32_t last_bcast_seqno;
        struct hlist_head neigh_list;
-       struct list_head frag_list;
-       unsigned long last_frag_packet;
        /* neigh_list_lock protects: neigh_list, router & bonding_list */
        spinlock_t neigh_list_lock;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
-       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
-        * neigh_node->real_bits & neigh_node->real_packet_count
-        */
-       spinlock_t ogm_cnt_lock;
        /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
        spinlock_t bcast_seqno_lock;
        atomic_t bond_candidates;
@@ -183,12 +246,28 @@ struct batadv_orig_node {
        spinlock_t in_coding_list_lock; /* Protects in_coding_list */
        spinlock_t out_coding_list_lock; /* Protects out_coding_list */
 #endif
+       struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+       struct list_head vlan_list;
+       spinlock_t vlan_list_lock; /* protects vlan_list */
+       struct batadv_orig_bat_iv bat_iv;
+};
+
+/**
+ * enum batadv_orig_capabilities - orig node capabilities
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
+ * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
+ */
+enum batadv_orig_capabilities {
+       BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
+       BATADV_ORIG_CAPA_HAS_NC = BIT(1),
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
  * @list: list node for batadv_priv_gw::list
  * @orig_node: pointer to corresponding orig node
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
  * @deleted: this struct is scheduled for deletion
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -196,46 +275,57 @@ struct batadv_orig_node {
 struct batadv_gw_node {
        struct hlist_node list;
        struct batadv_orig_node *orig_node;
+       uint32_t bandwidth_down;
+       uint32_t bandwidth_up;
        unsigned long deleted;
        atomic_t refcount;
        struct rcu_head rcu;
 };
 
 /**
- * struct batadv_neigh_node - structure for single hop neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @addr: mac address of neigh node
+ * struct batadv_neigh_bat_iv - B.A.T.M.A.N. IV specific structure for single
+ *  hop neighbors
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
  * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @last_ttl: last received ttl from this neigh node
- * @bonding_list: list node for batadv_orig_node::bond_list
- * @last_seen: when last packet via this neighbor was received
  * @real_bits: bitfield containing the number of OGMs received from this neigh
  *  node (relative to orig_node->last_real_seqno)
  * @real_packet_count: counted result of real_bits
+ * @lq_update_lock: lock protecting tq_recv & tq_index
+ */
+struct batadv_neigh_bat_iv {
+       uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+       uint8_t tq_index;
+       uint8_t tq_avg;
+       DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+       uint8_t real_packet_count;
+       spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
+};
+
+/**
+ * struct batadv_neigh_node - structure for single hops neighbors
+ * @list: list node for batadv_orig_node::neigh_list
  * @orig_node: pointer to corresponding orig_node
+ * @addr: the MAC address of the neighboring interface
  * @if_incoming: pointer to incoming hard interface
- * @lq_update_lock: lock protecting tq_recv & tq_index
+ * @last_seen: when last packet via this neighbor was received
+ * @last_ttl: last received ttl from this neigh node
+ * @bonding_list: list node for batadv_orig_node::bond_list
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
+ * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_neigh_node {
        struct hlist_node list;
+       struct batadv_orig_node *orig_node;
        uint8_t addr[ETH_ALEN];
-       uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
-       uint8_t tq_index;
-       uint8_t tq_avg;
+       struct batadv_hard_iface *if_incoming;
+       unsigned long last_seen;
        uint8_t last_ttl;
        struct list_head bonding_list;
-       unsigned long last_seen;
-       DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-       uint8_t real_packet_count;
-       struct batadv_orig_node *orig_node;
-       struct batadv_hard_iface *if_incoming;
-       spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
        atomic_t refcount;
        struct rcu_head rcu;
+       struct batadv_neigh_bat_iv bat_iv;
 };
 
 /**
@@ -265,6 +355,12 @@ struct batadv_bcast_duplist_entry {
  * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
  * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
  * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
+ * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
  * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
  * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
  * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
@@ -302,6 +398,12 @@ enum batadv_counters {
        BATADV_CNT_MGMT_TX_BYTES,
        BATADV_CNT_MGMT_RX,
        BATADV_CNT_MGMT_RX_BYTES,
+       BATADV_CNT_FRAG_TX,
+       BATADV_CNT_FRAG_TX_BYTES,
+       BATADV_CNT_FRAG_RX,
+       BATADV_CNT_FRAG_RX_BYTES,
+       BATADV_CNT_FRAG_FWD,
+       BATADV_CNT_FRAG_FWD_BYTES,
        BATADV_CNT_TT_REQUEST_TX,
        BATADV_CNT_TT_REQUEST_RX,
        BATADV_CNT_TT_RESPONSE_TX,
@@ -343,11 +445,14 @@ enum batadv_counters {
  * @changes_list_lock: lock protecting changes_list
  * @req_list_lock: lock protecting req_list
  * @roam_list_lock: lock protecting roam_list
- * @local_entry_num: number of entries in the local hash table
- * @local_crc: Checksum of the local table, recomputed before sending a new OGM
  * @last_changeset: last tt changeset this host has generated
  * @last_changeset_len: length of last tt changeset this host has generated
  * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
+ * @commit_lock: prevents from executing a local TT commit while reading the
+ *  local table. The local TT commit is made up by two operations (data
+ *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
+ *  executed atomically in order to avoid another thread to read the
+ *  table/metadata between those.
  * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
@@ -362,12 +467,12 @@ struct batadv_priv_tt {
        spinlock_t changes_list_lock; /* protects changes */
        spinlock_t req_list_lock; /* protects req_list */
        spinlock_t roam_list_lock; /* protects roam_list */
-       atomic_t local_entry_num;
-       uint16_t local_crc;
        unsigned char *last_changeset;
        int16_t last_changeset_len;
        /* protects last_changeset & last_changeset_len */
        spinlock_t last_changeset_lock;
+       /* prevents from executing a commit while reading the table */
+       spinlock_t commit_lock;
        struct delayed_work work;
 };
 
@@ -420,31 +525,31 @@ struct batadv_priv_debug_log {
  * @list: list of available gateway nodes
  * @list_lock: lock protecting gw_list & curr_gw
  * @curr_gw: pointer to currently selected gateway node
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
  * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
        struct hlist_head list;
        spinlock_t list_lock; /* protects gw_list & curr_gw */
        struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+       atomic_t bandwidth_down;
+       atomic_t bandwidth_up;
        atomic_t reselect;
 };
 
 /**
- * struct batadv_priv_vis - per mesh interface vis data
- * @send_list: list of batadv_vis_info packets to sent
- * @hash: hash table containing vis data from other nodes in the network
- * @hash_lock: lock protecting the hash table
- * @list_lock: lock protecting my_info::recv_list
- * @work: work queue callback item for vis packet sending
- * @my_info: holds this node's vis data sent on a regular basis
+ * struct batadv_priv_tvlv - per mesh interface tvlv data
+ * @container_list: list of registered tvlv containers to be sent with each OGM
+ * @handler_list: list of the various tvlv content handlers
+ * @container_list_lock: protects tvlv container list access
+ * @handler_list_lock: protects handler list access
  */
-struct batadv_priv_vis {
-       struct list_head send_list;
-       struct batadv_hashtable *hash;
-       spinlock_t hash_lock; /* protects hash */
-       spinlock_t list_lock; /* protects my_info::recv_list */
-       struct delayed_work work;
-       struct batadv_vis_info *my_info;
+struct batadv_priv_tvlv {
+       struct hlist_head container_list;
+       struct hlist_head handler_list;
+       spinlock_t container_list_lock; /* protects container_list */
+       spinlock_t handler_list_lock; /* protects handler_list */
 };
 
 /**
@@ -490,6 +595,26 @@ struct batadv_priv_nc {
        struct batadv_hashtable *decoding_hash;
 };
 
+/**
+ * struct batadv_softif_vlan - per VLAN attributes set
+ * @vid: VLAN identifier
+ * @kobj: kobject for sysfs vlan subdirectory
+ * @ap_isolation: AP isolation state
+ * @tt: TT private attributes (VLAN specific)
+ * @list: list node for bat_priv::softif_vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_softif_vlan {
+       unsigned short vid;
+       struct kobject *kobj;
+       atomic_t ap_isolation;          /* boolean */
+       struct batadv_vlan_tt tt;
+       struct hlist_node list;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
 /**
  * struct batadv_priv - per mesh interface data
  * @mesh_state: current status of the mesh (inactive/active/deactivating)
@@ -499,15 +624,15 @@ struct batadv_priv_nc {
  * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
  * @bonding: bool indicating whether traffic bonding is enabled
  * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @ap_isolation: bool indicating whether ap isolation is enabled
+ * @packet_size_max: max packet size that can be transmitted via
+ *  multiple fragmented skbs or a single frame if fragmentation is disabled
+ * @frag_seqno: incremental counter to identify chains of egress fragments
  * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
  *  enabled
  * @distributed_arp_table: bool indicating whether distributed ARP table is
  *  enabled
- * @vis_mode: vis operation: client or server (see batadv_vis_packettype)
  * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
  * @gw_sel_class: gateway selection class (applies if gw_mode client)
- * @gw_bandwidth: gateway announced bandwidth (applies if gw_mode server)
  * @orig_interval: OGM broadcast interval in milliseconds
  * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
  * @log_level: configured log level (see batadv_dbg_level)
@@ -527,11 +652,14 @@ struct batadv_priv_nc {
  * @primary_if: one of the hard interfaces assigned to this mesh interface
  *  becomes the primary interface
  * @bat_algo_ops: routing algorithm used by this mesh interface
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
+ *  of the mesh interface represented by this object
+ * @softif_vlan_list_lock: lock protecting softif_vlan_list
  * @bla: bridge loope avoidance data
  * @debug_log: holding debug logging relevant data
  * @gw: gateway data
  * @tt: translation table data
- * @vis: vis data
+ * @tvlv: type-version-length-value data
  * @dat: distributed arp table data
  * @network_coding: bool indicating whether network coding is enabled
  * @batadv_priv_nc: network coding data
@@ -544,17 +672,16 @@ struct batadv_priv {
        atomic_t aggregated_ogms;
        atomic_t bonding;
        atomic_t fragmentation;
-       atomic_t ap_isolation;
+       atomic_t packet_size_max;
+       atomic_t frag_seqno;
 #ifdef CONFIG_BATMAN_ADV_BLA
        atomic_t bridge_loop_avoidance;
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_t distributed_arp_table;
 #endif
-       atomic_t vis_mode;
        atomic_t gw_mode;
        atomic_t gw_sel_class;
-       atomic_t gw_bandwidth;
        atomic_t orig_interval;
        atomic_t hop_penalty;
 #ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -575,6 +702,8 @@ struct batadv_priv {
        struct work_struct cleanup_work;
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
        struct batadv_algo_ops *bat_algo_ops;
+       struct hlist_head softif_vlan_list;
+       spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
 #ifdef CONFIG_BATMAN_ADV_BLA
        struct batadv_priv_bla bla;
 #endif
@@ -583,7 +712,7 @@ struct batadv_priv {
 #endif
        struct batadv_priv_gw gw;
        struct batadv_priv_tt tt;
-       struct batadv_priv_vis vis;
+       struct batadv_priv_tvlv tvlv;
 #ifdef CONFIG_BATMAN_ADV_DAT
        struct batadv_priv_dat dat;
 #endif
@@ -620,7 +749,7 @@ struct batadv_socket_client {
 struct batadv_socket_packet {
        struct list_head list;
        size_t icmp_len;
-       struct batadv_icmp_packet_rr icmp_packet;
+       uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
 /**
@@ -677,6 +806,7 @@ struct batadv_bla_claim {
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
  * @addr: mac address of non-mesh client
+ * @vid: VLAN identifier
  * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
  *  batadv_priv_tt::global_hash
  * @flags: various state handling flags (see batadv_tt_client_flags)
@@ -686,6 +816,7 @@ struct batadv_bla_claim {
  */
 struct batadv_tt_common_entry {
        uint8_t addr[ETH_ALEN];
+       unsigned short vid;
        struct hlist_node hash_entry;
        uint16_t flags;
        unsigned long added_at;
@@ -740,7 +871,7 @@ struct batadv_tt_orig_list_entry {
  */
 struct batadv_tt_change_node {
        struct list_head list;
-       struct batadv_tt_change change;
+       struct batadv_tvlv_tt_change change;
 };
 
 /**
@@ -865,78 +996,6 @@ struct batadv_forw_packet {
        struct batadv_hard_iface *if_incoming;
 };
 
-/**
- * struct batadv_frag_packet_list_entry - storage for fragment packet
- * @list: list node for orig_node::frag_list
- * @seqno: sequence number of the fragment
- * @skb: fragment's skb buffer
- */
-struct batadv_frag_packet_list_entry {
-       struct list_head list;
-       uint16_t seqno;
-       struct sk_buff *skb;
-};
-
-/**
- * struct batadv_vis_info - local data for vis information
- * @first_seen: timestamp used for purging stale vis info entries
- * @recv_list: List of server-neighbors we have received this packet from. This
- *  packet should not be re-forward to them again. List elements are struct
- *  batadv_vis_recvlist_node
- * @send_list: list of packets to be forwarded
- * @refcount: number of contexts the object is used
- * @hash_entry: hlist node for batadv_priv_vis::hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @skb_packet: contains the vis packet
- */
-struct batadv_vis_info {
-       unsigned long first_seen;
-       struct list_head recv_list;
-       struct list_head send_list;
-       struct kref refcount;
-       struct hlist_node hash_entry;
-       struct batadv_priv *bat_priv;
-       struct sk_buff *skb_packet;
-} __packed;
-
-/**
- * struct batadv_vis_info_entry - contains link information for vis
- * @src: source MAC of the link, all zero for local TT entry
- * @dst: destination MAC of the link, client mac address for local TT entry
- * @quality: transmission quality of the link, or 0 for local TT entry
- */
-struct batadv_vis_info_entry {
-       uint8_t  src[ETH_ALEN];
-       uint8_t  dest[ETH_ALEN];
-       uint8_t  quality;
-} __packed;
-
-/**
- * struct batadv_vis_recvlist_node - list entry for batadv_vis_info::recv_list
- * @list: list node for batadv_vis_info::recv_list
- * @mac: MAC address of the originator from where the vis_info was received
- */
-struct batadv_vis_recvlist_node {
-       struct list_head list;
-       uint8_t mac[ETH_ALEN];
-};
-
-/**
- * struct batadv_vis_if_list_entry - auxiliary data for vis data generation
- * @addr: MAC address of the interface
- * @primary: true if this interface is the primary interface
- * @list: list node the interface list
- *
- * While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct batadv_vis_if_list_entry {
-       uint8_t addr[ETH_ALEN];
-       bool primary;
-       struct hlist_node list;
-};
-
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
@@ -948,6 +1007,16 @@ struct batadv_vis_if_list_entry {
  * @bat_primary_iface_set: called when primary interface is selected / changed
  * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
  * @bat_ogm_emit: send scheduled OGM
+ * @bat_neigh_cmp: compare the metrics of two neighbors
+ * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or
+ *  better than neigh2 from the metric prospective
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ *  orig_node object
+ * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to a new hard-interface being added into the mesh
+ * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to an hard-interface being removed from the mesh
  */
 struct batadv_algo_ops {
        struct hlist_node list;
@@ -958,6 +1027,17 @@ struct batadv_algo_ops {
        void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
        void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
        void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+       int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
+                            struct batadv_neigh_node *neigh2);
+       bool (*bat_neigh_is_equiv_or_better)(struct batadv_neigh_node *neigh1,
+                                            struct batadv_neigh_node *neigh2);
+       /* orig_node handling API */
+       void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq);
+       void (*bat_orig_free)(struct batadv_orig_node *orig_node);
+       int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
+                              int max_if_num);
+       int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
+                              int max_if_num, int del_if_num);
 };
 
 /**
@@ -965,6 +1045,7 @@ struct batadv_algo_ops {
  * is used to stored ARP entries needed for the global DAT cache
  * @ip: the IPv4 corresponding to this DAT/ARP entry
  * @mac_addr: the MAC address associated to the stored IPv4
+ * @vid: the vlan ID associated to this entry
  * @last_update: time in jiffies when this entry was refreshed last time
  * @hash_entry: hlist node for batadv_priv_dat::hash
  * @refcount: number of contexts the object is used
@@ -973,6 +1054,7 @@ struct batadv_algo_ops {
 struct batadv_dat_entry {
        __be32 ip;
        uint8_t mac_addr[ETH_ALEN];
+       unsigned short vid;
        unsigned long last_update;
        struct hlist_node hash_entry;
        atomic_t refcount;
@@ -992,4 +1074,60 @@ struct batadv_dat_candidate {
        struct batadv_orig_node *orig_node;
 };
 
+/**
+ * struct batadv_tvlv_container - container for tvlv appended to OGMs
+ * @list: hlist node for batadv_priv_tvlv::container_list
+ * @tvlv_hdr: tvlv header information needed to construct the tvlv
+ * @value_len: length of the buffer following this struct which contains
+ *  the actual tvlv payload
+ * @refcount: number of contexts the object is used
+ */
+struct batadv_tvlv_container {
+       struct hlist_node list;
+       struct batadv_tvlv_hdr tvlv_hdr;
+       atomic_t refcount;
+};
+
+/**
+ * struct batadv_tvlv_handler - handler for specific tvlv type and version
+ * @list: hlist node for batadv_priv_tvlv::handler_list
+ * @ogm_handler: handler callback which is given the tvlv payload to process on
+ *  incoming OGM packets
+ * @unicast_handler: handler callback which is given the tvlv payload to process
+ *  on incoming unicast tvlv packets
+ * @type: tvlv type this handler feels responsible for
+ * @version: tvlv version this handler feels responsible for
+ * @flags: tvlv handler flags
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+struct batadv_tvlv_handler {
+       struct hlist_node list;
+       void (*ogm_handler)(struct batadv_priv *bat_priv,
+                           struct batadv_orig_node *orig,
+                           uint8_t flags,
+                           void *tvlv_value, uint16_t tvlv_value_len);
+       int (*unicast_handler)(struct batadv_priv *bat_priv,
+                              uint8_t *src, uint8_t *dst,
+                              void *tvlv_value, uint16_t tvlv_value_len);
+       uint8_t type;
+       uint8_t version;
+       uint8_t flags;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
+/**
+ * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
+ *  this handler even if its type was not found (with no data)
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
+ *  a handler as being called, so it won't be called if the
+ *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
+enum batadv_tvlv_handler_flags {
+       BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+       BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
+};
+
 #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
deleted file mode 100644 (file)
index 48b31d3..0000000
+++ /dev/null
@@ -1,491 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "gateway_client.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-static struct sk_buff *
-batadv_frag_merge_packet(struct list_head *head,
-                        struct batadv_frag_packet_list_entry *tfp,
-                        struct sk_buff *skb)
-{
-       struct batadv_unicast_frag_packet *up;
-       struct sk_buff *tmp_skb;
-       struct batadv_unicast_packet *unicast_packet;
-       int hdr_len = sizeof(*unicast_packet);
-       int uni_diff = sizeof(*up) - hdr_len;
-       uint8_t *packet_pos;
-
-       up = (struct batadv_unicast_frag_packet *)skb->data;
-       /* set skb to the first part and tmp_skb to the second part */
-       if (up->flags & BATADV_UNI_FRAG_HEAD) {
-               tmp_skb = tfp->skb;
-       } else {
-               tmp_skb = skb;
-               skb = tfp->skb;
-       }
-
-       if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
-               goto err;
-
-       skb_pull(tmp_skb, sizeof(*up));
-       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
-               goto err;
-
-       /* move free entry to end */
-       tfp->skb = NULL;
-       tfp->seqno = 0;
-       list_move_tail(&tfp->list, head);
-
-       memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
-       kfree_skb(tmp_skb);
-
-       memmove(skb->data + uni_diff, skb->data, hdr_len);
-       packet_pos = skb_pull(skb, uni_diff);
-       unicast_packet = (struct batadv_unicast_packet *)packet_pos;
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-
-       return skb;
-
-err:
-       /* free buffered skb, skb will be freed later */
-       kfree_skb(tfp->skb);
-       return NULL;
-}
-
-static void batadv_frag_create_entry(struct list_head *head,
-                                    struct sk_buff *skb)
-{
-       struct batadv_frag_packet_list_entry *tfp;
-       struct batadv_unicast_frag_packet *up;
-
-       up = (struct batadv_unicast_frag_packet *)skb->data;
-
-       /* free and oldest packets stand at the end */
-       tfp = list_entry((head)->prev, typeof(*tfp), list);
-       kfree_skb(tfp->skb);
-
-       tfp->seqno = ntohs(up->seqno);
-       tfp->skb = skb;
-       list_move(&tfp->list, head);
-       return;
-}
-
-static int batadv_frag_create_buffer(struct list_head *head)
-{
-       int i;
-       struct batadv_frag_packet_list_entry *tfp;
-
-       for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
-               tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
-               if (!tfp) {
-                       batadv_frag_list_free(head);
-                       return -ENOMEM;
-               }
-               tfp->skb = NULL;
-               tfp->seqno = 0;
-               INIT_LIST_HEAD(&tfp->list);
-               list_add(&tfp->list, head);
-       }
-
-       return 0;
-}
-
-static struct batadv_frag_packet_list_entry *
-batadv_frag_search_packet(struct list_head *head,
-                         const struct batadv_unicast_frag_packet *up)
-{
-       struct batadv_frag_packet_list_entry *tfp;
-       struct batadv_unicast_frag_packet *tmp_up = NULL;
-       bool is_head_tmp, is_head;
-       uint16_t search_seqno;
-
-       if (up->flags & BATADV_UNI_FRAG_HEAD)
-               search_seqno = ntohs(up->seqno)+1;
-       else
-               search_seqno = ntohs(up->seqno)-1;
-
-       is_head = up->flags & BATADV_UNI_FRAG_HEAD;
-
-       list_for_each_entry(tfp, head, list) {
-               if (!tfp->skb)
-                       continue;
-
-               if (tfp->seqno == ntohs(up->seqno))
-                       goto mov_tail;
-
-               tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
-
-               if (tfp->seqno == search_seqno) {
-                       is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
-                       if (is_head_tmp != is_head)
-                               return tfp;
-                       else
-                               goto mov_tail;
-               }
-       }
-       return NULL;
-
-mov_tail:
-       list_move_tail(&tfp->list, head);
-       return NULL;
-}
-
-void batadv_frag_list_free(struct list_head *head)
-{
-       struct batadv_frag_packet_list_entry *pf, *tmp_pf;
-
-       if (!list_empty(head)) {
-               list_for_each_entry_safe(pf, tmp_pf, head, list) {
-                       kfree_skb(pf->skb);
-                       list_del(&pf->list);
-                       kfree(pf);
-               }
-       }
-       return;
-}
-
-/* frag_reassemble_skb():
- * returns NET_RX_DROP if the operation failed - skb is left intact
- * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
- * or the skb could be reassembled (skb_new will point to the new packet and
- * skb was freed)
- */
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-                              struct batadv_priv *bat_priv,
-                              struct sk_buff **new_skb)
-{
-       struct batadv_orig_node *orig_node;
-       struct batadv_frag_packet_list_entry *tmp_frag_entry;
-       int ret = NET_RX_DROP;
-       struct batadv_unicast_frag_packet *unicast_packet;
-
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-       *new_skb = NULL;
-
-       orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
-       if (!orig_node)
-               goto out;
-
-       orig_node->last_frag_packet = jiffies;
-
-       if (list_empty(&orig_node->frag_list) &&
-           batadv_frag_create_buffer(&orig_node->frag_list)) {
-               pr_debug("couldn't create frag buffer\n");
-               goto out;
-       }
-
-       tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
-                                                  unicast_packet);
-
-       if (!tmp_frag_entry) {
-               batadv_frag_create_entry(&orig_node->frag_list, skb);
-               ret = NET_RX_SUCCESS;
-               goto out;
-       }
-
-       *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
-                                           tmp_frag_entry, skb);
-       /* if not, merge failed */
-       if (*new_skb)
-               ret = NET_RX_SUCCESS;
-
-out:
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-       return ret;
-}
-
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-                        struct batadv_hard_iface *hard_iface,
-                        const uint8_t dstaddr[])
-{
-       struct batadv_unicast_packet tmp_uc, *unicast_packet;
-       struct batadv_hard_iface *primary_if;
-       struct sk_buff *frag_skb;
-       struct batadv_unicast_frag_packet *frag1, *frag2;
-       int uc_hdr_len = sizeof(*unicast_packet);
-       int ucf_hdr_len = sizeof(*frag1);
-       int data_len = skb->len - uc_hdr_len;
-       int large_tail = 0, ret = NET_RX_DROP;
-       uint16_t seqno;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto dropped;
-
-       frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
-       if (!frag_skb)
-               goto dropped;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(frag_skb, ucf_hdr_len);
-
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
-       skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
-
-       if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
-           batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
-               goto drop_frag;
-
-       frag1 = (struct batadv_unicast_frag_packet *)skb->data;
-       frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
-
-       memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
-
-       frag1->header.ttl--;
-       frag1->header.version = BATADV_COMPAT_VERSION;
-       frag1->header.packet_type = BATADV_UNICAST_FRAG;
-
-       memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(frag2, frag1, sizeof(*frag2));
-
-       if (data_len & 1)
-               large_tail = BATADV_UNI_FRAG_LARGETAIL;
-
-       frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
-       frag2->flags = large_tail;
-
-       seqno = atomic_add_return(2, &hard_iface->frag_seqno);
-       frag1->seqno = htons(seqno - 1);
-       frag2->seqno = htons(seqno);
-
-       batadv_send_skb_packet(skb, hard_iface, dstaddr);
-       batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
-       ret = NET_RX_SUCCESS;
-       goto out;
-
-drop_frag:
-       kfree_skb(frag_skb);
-dropped:
-       kfree_skb(skb);
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/**
- * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
- * common fields for unicast packets
- * @skb: packet
- * @hdr_size: amount of bytes to push at the beginning of the skb
- * @orig_node: the destination node
- *
- * Returns false if the buffer extension was not possible or true otherwise
- */
-static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
-                                            struct batadv_orig_node *orig_node)
-{
-       struct batadv_unicast_packet *unicast_packet;
-       uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-
-       if (batadv_skb_head_push(skb, hdr_size) < 0)
-               return false;
-
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
-       /* batman packet type: unicast */
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-       /* set unicast ttl */
-       unicast_packet->header.ttl = BATADV_TTL;
-       /* copy the destination for faster routing */
-       memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-       /* set the destination tt version number */
-       unicast_packet->ttvn = ttvn;
-
-       return true;
-}
-
-/**
- * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
-                                      struct batadv_orig_node *orig_node)
-{
-       size_t uni_size = sizeof(struct batadv_unicast_packet);
-       return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
-}
-
-/**
- * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
- * header
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- * @packet_subtype: the batman 4addr packet subtype to use
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb,
-                                     struct batadv_orig_node *orig,
-                                     int packet_subtype)
-{
-       struct batadv_hard_iface *primary_if;
-       struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-       bool ret = false;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       /* pull the header space and fill the unicast_packet substructure.
-        * We can do that because the first member of the unicast_4addr_packet
-        * is of type struct unicast_packet
-        */
-       if (!batadv_unicast_push_and_fill_skb(skb,
-                                             sizeof(*unicast_4addr_packet),
-                                             orig))
-               goto out;
-
-       unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-       unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
-       memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
-              ETH_ALEN);
-       unicast_4addr_packet->subtype = packet_subtype;
-       unicast_4addr_packet->reserved = 0;
-
-       ret = true;
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/**
- * batadv_unicast_generic_send_skb - send an skb as unicast
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: payload to send
- * @packet_type: the batman unicast packet type to use
- * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
- *                 not BATADV_UNICAT_4ADDR
- *
- * Returns 1 in case of error or 0 otherwise
- */
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-                                   struct sk_buff *skb, int packet_type,
-                                   int packet_subtype)
-{
-       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-       struct batadv_unicast_packet *unicast_packet;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *neigh_node;
-       int data_len = skb->len;
-       int ret = NET_RX_DROP;
-       unsigned int dev_mtu, header_len;
-
-       /* get routing information */
-       if (is_multicast_ether_addr(ethhdr->h_dest)) {
-               orig_node = batadv_gw_get_selected_orig(bat_priv);
-               if (orig_node)
-                       goto find_router;
-       }
-
-       /* check for tt host - increases orig_node refcount.
-        * returns NULL in case of AP isolation
-        */
-       orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-                                            ethhdr->h_dest);
-
-find_router:
-       /* find_router():
-        *  - if orig_node is NULL it returns NULL
-        *  - increases neigh_nodes refcount if found.
-        */
-       neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
-
-       if (!neigh_node)
-               goto out;
-
-       switch (packet_type) {
-       case BATADV_UNICAST:
-               if (!batadv_unicast_prepare_skb(skb, orig_node))
-                       goto out;
-
-               header_len = sizeof(struct batadv_unicast_packet);
-               break;
-       case BATADV_UNICAST_4ADDR:
-               if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
-                                                     packet_subtype))
-                       goto out;
-
-               header_len = sizeof(struct batadv_unicast_4addr_packet);
-               break;
-       default:
-               /* this function supports UNICAST and UNICAST_4ADDR only. It
-                * should never be invoked with any other packet type
-                */
-               goto out;
-       }
-
-       ethhdr = (struct ethhdr *)(skb->data + header_len);
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-       /* inform the destination node that we are still missing a correct route
-        * for this client. The destination will receive this packet and will
-        * try to reroute it because the ttvn contained in the header is less
-        * than the current one
-        */
-       if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
-               unicast_packet->ttvn = unicast_packet->ttvn - 1;
-
-       dev_mtu = neigh_node->if_incoming->net_dev->mtu;
-       /* fragmentation mechanism only works for UNICAST (now) */
-       if (packet_type == BATADV_UNICAST &&
-           atomic_read(&bat_priv->fragmentation) &&
-           data_len + sizeof(*unicast_packet) > dev_mtu) {
-               /* send frag skb decreases ttl */
-               unicast_packet->header.ttl++;
-               ret = batadv_frag_send_skb(skb, bat_priv,
-                                          neigh_node->if_incoming,
-                                          neigh_node->addr);
-               goto out;
-       }
-
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
-
-out:
-       if (neigh_node)
-               batadv_neigh_node_free_ref(neigh_node);
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-       if (ret == NET_RX_DROP)
-               kfree_skb(skb);
-       return ret;
-}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
deleted file mode 100644 (file)
index 429cf8a..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#include "packet.h"
-
-#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-                              struct batadv_priv *bat_priv,
-                              struct sk_buff **new_skb);
-void batadv_frag_list_free(struct list_head *head);
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-                        struct batadv_hard_iface *hard_iface,
-                        const uint8_t dstaddr[]);
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb,
-                                     struct batadv_orig_node *orig_node,
-                                     int packet_subtype);
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-                                   struct sk_buff *skb, int packet_type,
-                                   int packet_subtype);
-
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- */
-static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
-                                         struct sk_buff *skb)
-{
-       return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
-                                              0);
-}
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- * @packet_subtype: the batman 4addr packet subtype to use
- */
-static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
-                                               struct sk_buff *skb,
-                                               int packet_subtype)
-{
-       return batadv_unicast_generic_send_skb(bat_priv, skb,
-                                              BATADV_UNICAST_4ADDR,
-                                              packet_subtype);
-}
-
-static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
-{
-       const struct batadv_unicast_frag_packet *unicast_packet;
-       int uneven_correction = 0;
-       unsigned int merged_size;
-
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-
-       if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
-               if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
-                       uneven_correction = 1;
-               else
-                       uneven_correction = -1;
-       }
-
-       merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
-       merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
-
-       return merged_size <= mtu;
-}
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
deleted file mode 100644 (file)
index d8ea31a..0000000
+++ /dev/null
@@ -1,938 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-#include "originator.h"
-
-#define BATADV_MAX_VIS_PACKET_SIZE 1000
-
-/* hash class keys */
-static struct lock_class_key batadv_vis_hash_lock_class_key;
-
-/* free the info */
-static void batadv_free_info(struct kref *ref)
-{
-       struct batadv_vis_info *info;
-       struct batadv_priv *bat_priv;
-       struct batadv_vis_recvlist_node *entry, *tmp;
-
-       info = container_of(ref, struct batadv_vis_info, refcount);
-       bat_priv = info->bat_priv;
-
-       list_del_init(&info->send_list);
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-       kfree_skb(info->skb_packet);
-       kfree(info);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
-{
-       const struct batadv_vis_info *d1, *d2;
-       const struct batadv_vis_packet *p1, *p2;
-
-       d1 = container_of(node, struct batadv_vis_info, hash_entry);
-       d2 = data2;
-       p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
-       p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
-       return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
- */
-static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
-{
-       const struct batadv_vis_info *vis_info = data;
-       const struct batadv_vis_packet *packet;
-       const unsigned char *key;
-       uint32_t hash = 0;
-       size_t i;
-
-       packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
-       key = packet->vis_orig;
-       for (i = 0; i < ETH_ALEN; i++) {
-               hash += key[i];
-               hash += (hash << 10);
-               hash ^= (hash >> 6);
-       }
-
-       hash += (hash << 3);
-       hash ^= (hash >> 11);
-       hash += (hash << 15);
-
-       return hash % size;
-}
-
-static struct batadv_vis_info *
-batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       struct hlist_head *head;
-       struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
-       uint32_t index;
-
-       if (!hash)
-               return NULL;
-
-       index = batadv_vis_info_choose(data, hash->size);
-       head = &hash->table[index];
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
-               if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
-                       continue;
-
-               vis_info_tmp = vis_info;
-               break;
-       }
-       rcu_read_unlock();
-
-       return vis_info_tmp;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list
- */
-static void batadv_vis_data_insert_interface(const uint8_t *interface,
-                                            struct hlist_head *if_list,
-                                            bool primary)
-{
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, if_list, list) {
-               if (batadv_compare_eth(entry->addr, interface))
-                       return;
-       }
-
-       /* it's a new address, add it to the list */
-       entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-       if (!entry)
-               return;
-       memcpy(entry->addr, interface, ETH_ALEN);
-       entry->primary = primary;
-       hlist_add_head(&entry->list, if_list);
-}
-
-static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
-                                         const struct hlist_head *if_list)
-{
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, if_list, list) {
-               if (entry->primary)
-                       seq_puts(seq, "PRIMARY, ");
-               else
-                       seq_printf(seq,  "SEC %pM, ", entry->addr);
-       }
-}
-
-/* read an entry  */
-static ssize_t
-batadv_vis_data_read_entry(struct seq_file *seq,
-                          const struct batadv_vis_info_entry *entry,
-                          const uint8_t *src, bool primary)
-{
-       if (primary && entry->quality == 0)
-               return seq_printf(seq, "TT %pM, ", entry->dest);
-       else if (batadv_compare_eth(entry->src, src))
-               return seq_printf(seq, "TQ %pM %d, ", entry->dest,
-                                 entry->quality);
-
-       return 0;
-}
-
-static void
-batadv_vis_data_insert_interfaces(struct hlist_head *list,
-                                 struct batadv_vis_packet *packet,
-                                 struct batadv_vis_info_entry *entries)
-{
-       int i;
-
-       for (i = 0; i < packet->entries; i++) {
-               if (entries[i].quality == 0)
-                       continue;
-
-               if (batadv_compare_eth(entries[i].src, packet->vis_orig))
-                       continue;
-
-               batadv_vis_data_insert_interface(entries[i].src, list, false);
-       }
-}
-
-static void batadv_vis_data_read_entries(struct seq_file *seq,
-                                        struct hlist_head *list,
-                                        struct batadv_vis_packet *packet,
-                                        struct batadv_vis_info_entry *entries)
-{
-       int i;
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, list, list) {
-               seq_printf(seq, "%pM,", entry->addr);
-
-               for (i = 0; i < packet->entries; i++)
-                       batadv_vis_data_read_entry(seq, &entries[i],
-                                                  entry->addr, entry->primary);
-
-               /* add primary/secondary records */
-               if (batadv_compare_eth(entry->addr, packet->vis_orig))
-                       batadv_vis_data_read_prim_sec(seq, list);
-
-               seq_puts(seq, "\n");
-       }
-}
-
-static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
-                                            const struct hlist_head *head)
-{
-       struct batadv_vis_info *info;
-       struct batadv_vis_packet *packet;
-       uint8_t *entries_pos;
-       struct batadv_vis_info_entry *entries;
-       struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *n;
-
-       HLIST_HEAD(vis_if_list);
-
-       hlist_for_each_entry_rcu(info, head, hash_entry) {
-               packet = (struct batadv_vis_packet *)info->skb_packet->data;
-               entries_pos = (uint8_t *)packet + sizeof(*packet);
-               entries = (struct batadv_vis_info_entry *)entries_pos;
-
-               batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
-                                                true);
-               batadv_vis_data_insert_interfaces(&vis_if_list, packet,
-                                                 entries);
-               batadv_vis_data_read_entries(seq, &vis_if_list, packet,
-                                            entries);
-
-               hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
-                       hlist_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-}
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
-{
-       struct batadv_hard_iface *primary_if;
-       struct hlist_head *head;
-       struct net_device *net_dev = (struct net_device *)seq->private;
-       struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       uint32_t i;
-       int ret = 0;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               goto out;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-               batadv_vis_seq_print_text_bucket(seq, head);
-       }
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in.
- */
-static void batadv_send_list_add(struct batadv_priv *bat_priv,
-                                struct batadv_vis_info *info)
-{
-       if (list_empty(&info->send_list)) {
-               kref_get(&info->refcount);
-               list_add_tail(&info->send_list, &bat_priv->vis.send_list);
-       }
-}
-
-/* delete the info packet from the send list, if it was
- * linked in.
- */
-static void batadv_send_list_del(struct batadv_vis_info *info)
-{
-       if (!list_empty(&info->send_list)) {
-               list_del_init(&info->send_list);
-               kref_put(&info->refcount, batadv_free_info);
-       }
-}
-
-/* tries to add one entry to the receive list. */
-static void batadv_recv_list_add(struct batadv_priv *bat_priv,
-                                struct list_head *recv_list, const char *mac)
-{
-       struct batadv_vis_recvlist_node *entry;
-
-       entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-       if (!entry)
-               return;
-
-       memcpy(entry->mac, mac, ETH_ALEN);
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_add_tail(&entry->list, recv_list);
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
-                                 const struct list_head *recv_list,
-                                 const char *mac)
-{
-       const struct batadv_vis_recvlist_node *entry;
-
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_for_each_entry(entry, recv_list, list) {
-               if (batadv_compare_eth(entry->mac, mac)) {
-                       spin_unlock_bh(&bat_priv->vis.list_lock);
-                       return 1;
-               }
-       }
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-       return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside.  is_new is set when the packet
- * is newer than old entries in the hash.
- */
-static struct batadv_vis_info *
-batadv_add_packet(struct batadv_priv *bat_priv,
-                 struct batadv_vis_packet *vis_packet, int vis_info_len,
-                 int *is_new, int make_broadcast)
-{
-       struct batadv_vis_info *info, *old_info;
-       struct batadv_vis_packet *search_packet, *old_packet;
-       struct batadv_vis_info search_elem;
-       struct batadv_vis_packet *packet;
-       struct sk_buff *tmp_skb;
-       int hash_added;
-       size_t len;
-       size_t max_entries;
-
-       *is_new = 0;
-       /* sanity check */
-       if (!bat_priv->vis.hash)
-               return NULL;
-
-       /* see if the packet is already in vis_hash */
-       search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
-       if (!search_elem.skb_packet)
-               return NULL;
-       len = sizeof(*search_packet);
-       tmp_skb = search_elem.skb_packet;
-       search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
-
-       memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-       old_info = batadv_vis_hash_find(bat_priv, &search_elem);
-       kfree_skb(search_elem.skb_packet);
-
-       if (old_info) {
-               tmp_skb = old_info->skb_packet;
-               old_packet = (struct batadv_vis_packet *)tmp_skb->data;
-               if (!batadv_seq_after(ntohl(vis_packet->seqno),
-                                     ntohl(old_packet->seqno))) {
-                       if (old_packet->seqno == vis_packet->seqno) {
-                               batadv_recv_list_add(bat_priv,
-                                                    &old_info->recv_list,
-                                                    vis_packet->sender_orig);
-                               return old_info;
-                       } else {
-                               /* newer packet is already in hash. */
-                               return NULL;
-                       }
-               }
-               /* remove old entry */
-               batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                  batadv_vis_info_choose, old_info);
-               batadv_send_list_del(old_info);
-               kref_put(&old_info->refcount, batadv_free_info);
-       }
-
-       info = kmalloc(sizeof(*info), GFP_ATOMIC);
-       if (!info)
-               return NULL;
-
-       len = sizeof(*packet) + vis_info_len;
-       info->skb_packet = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!info->skb_packet) {
-               kfree(info);
-               return NULL;
-       }
-       info->skb_packet->priority = TC_PRIO_CONTROL;
-       skb_reserve(info->skb_packet, ETH_HLEN);
-       packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
-
-       kref_init(&info->refcount);
-       INIT_LIST_HEAD(&info->send_list);
-       INIT_LIST_HEAD(&info->recv_list);
-       info->first_seen = jiffies;
-       info->bat_priv = bat_priv;
-       memcpy(packet, vis_packet, len);
-
-       /* initialize and add new packet. */
-       *is_new = 1;
-
-       /* Make it a broadcast packet, if required */
-       if (make_broadcast)
-               memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-
-       /* repair if entries is longer than packet. */
-       max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
-       if (packet->entries > max_entries)
-               packet->entries = max_entries;
-
-       batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
-       /* try to add it */
-       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                    batadv_vis_info_choose, info,
-                                    &info->hash_entry);
-       if (hash_added != 0) {
-               /* did not work (for some reason) */
-               kref_put(&info->refcount, batadv_free_info);
-               info = NULL;
-       }
-
-       return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_packet *vis_packet,
-                                      int vis_info_len)
-{
-       struct batadv_vis_info *info;
-       int is_new, make_broadcast;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-
-       make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-                                &is_new, make_broadcast);
-       if (!info)
-               goto end;
-
-       /* only if we are server ourselves and packet is newer than the one in
-        * hash.
-        */
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
-               batadv_send_list_add(bat_priv, info);
-end:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-                                        struct batadv_vis_packet *vis_packet,
-                                        int vis_info_len)
-{
-       struct batadv_vis_info *info;
-       struct batadv_vis_packet *packet;
-       int is_new;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-       int are_target = 0;
-
-       /* clients shall not broadcast. */
-       if (is_broadcast_ether_addr(vis_packet->target_orig))
-               return;
-
-       /* Are we the target for this VIS packet? */
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC   &&
-           batadv_is_my_mac(bat_priv, vis_packet->target_orig))
-               are_target = 1;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-                                &is_new, are_target);
-
-       if (!info)
-               goto end;
-       /* note that outdated packets will be dropped at this point. */
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       /* send only if we're the target server or ... */
-       if (are_target && is_new) {
-               packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
-               batadv_send_list_add(bat_priv, info);
-
-               /* ... we're not the recipient (and thus need to forward). */
-       } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
-               batadv_send_list_add(bat_priv, info);
-       }
-
-end:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked
- */
-static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_info *info)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct batadv_neigh_node *router;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_vis_packet *packet;
-       int best_tq = -1;
-       uint32_t i;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       router = batadv_orig_node_get_router(orig_node);
-                       if (!router)
-                               continue;
-
-                       if ((orig_node->flags & BATADV_VIS_SERVER) &&
-                           (router->tq_avg > best_tq)) {
-                               best_tq = router->tq_avg;
-                               memcpy(packet->target_orig, orig_node->orig,
-                                      ETH_ALEN);
-                       }
-                       batadv_neigh_node_free_ref(router);
-               }
-               rcu_read_unlock();
-       }
-
-       return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
-{
-       const struct batadv_vis_packet *packet;
-       size_t num;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
-
-       if (num < packet->entries + 1)
-               return true;
-       return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated
- */
-static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *router;
-       struct batadv_vis_info *info = bat_priv->vis.my_info;
-       struct batadv_vis_packet *packet;
-       struct batadv_vis_info_entry *entry;
-       struct batadv_tt_common_entry *tt_common_entry;
-       uint8_t *packet_pos;
-       int best_tq = -1;
-       uint32_t i;
-
-       info->first_seen = jiffies;
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
-       memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-       packet->header.ttl = BATADV_TTL;
-       packet->seqno = htonl(ntohl(packet->seqno) + 1);
-       packet->entries = 0;
-       packet->reserved = 0;
-       skb_trim(info->skb_packet, sizeof(*packet));
-
-       if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
-               best_tq = batadv_find_best_vis_server(bat_priv, info);
-
-               if (best_tq < 0)
-                       return best_tq;
-       }
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       router = batadv_orig_node_get_router(orig_node);
-                       if (!router)
-                               continue;
-
-                       if (!batadv_compare_eth(router->addr, orig_node->orig))
-                               goto next;
-
-                       if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
-                               goto next;
-
-                       if (router->tq_avg < 1)
-                               goto next;
-
-                       /* fill one entry into buffer. */
-                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-                       entry = (struct batadv_vis_info_entry *)packet_pos;
-                       memcpy(entry->src,
-                              router->if_incoming->net_dev->dev_addr,
-                              ETH_ALEN);
-                       memcpy(entry->dest, orig_node->orig, ETH_ALEN);
-                       entry->quality = router->tq_avg;
-                       packet->entries++;
-
-next:
-                       batadv_neigh_node_free_ref(router);
-
-                       if (batadv_vis_packet_full(info))
-                               goto unlock;
-               }
-               rcu_read_unlock();
-       }
-
-       hash = bat_priv->tt.local_hash;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, head,
-                                        hash_entry) {
-                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-                       entry = (struct batadv_vis_info_entry *)packet_pos;
-                       memset(entry->src, 0, ETH_ALEN);
-                       memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
-                       entry->quality = 0; /* 0 means TT */
-                       packet->entries++;
-
-                       if (batadv_vis_packet_full(info))
-                               goto unlock;
-               }
-               rcu_read_unlock();
-       }
-
-       return 0;
-
-unlock:
-       rcu_read_unlock();
-       return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held
- */
-static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
-{
-       uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       struct hlist_node *node_tmp;
-       struct hlist_head *head;
-       struct batadv_vis_info *info;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               hlist_for_each_entry_safe(info, node_tmp,
-                                         head, hash_entry) {
-                       /* never purge own data. */
-                       if (info == bat_priv->vis.my_info)
-                               continue;
-
-                       if (batadv_has_timed_out(info->first_seen,
-                                                BATADV_VIS_TIMEOUT)) {
-                               hlist_del(&info->hash_entry);
-                               batadv_send_list_del(info);
-                               kref_put(&info->refcount, batadv_free_info);
-                       }
-               }
-       }
-}
-
-static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
-                                       struct batadv_vis_info *info)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_vis_packet *packet;
-       struct sk_buff *skb;
-       uint32_t i, res;
-
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       /* send to all routers in range. */
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       /* if it's a vis server and reachable, send it. */
-                       if (!(orig_node->flags & BATADV_VIS_SERVER))
-                               continue;
-
-                       /* don't send it if we already received the packet from
-                        * this node.
-                        */
-                       if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
-                                                  orig_node->orig))
-                               continue;
-
-                       memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-                       skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-                       if (!skb)
-                               continue;
-
-                       res = batadv_send_skb_to_orig(skb, orig_node, NULL);
-                       if (res == NET_XMIT_DROP)
-                               kfree_skb(skb);
-               }
-               rcu_read_unlock();
-       }
-}
-
-static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
-                                     struct batadv_vis_info *info)
-{
-       struct batadv_orig_node *orig_node;
-       struct sk_buff *skb;
-       struct batadv_vis_packet *packet;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
-       if (!orig_node)
-               goto out;
-
-       skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-       if (!skb)
-               goto out;
-
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
-               kfree_skb(skb);
-
-out:
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-}
-
-/* only send one vis packet. called from batadv_send_vis_packets() */
-static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
-                                  struct batadv_vis_info *info)
-{
-       struct batadv_hard_iface *primary_if;
-       struct batadv_vis_packet *packet;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       if (packet->header.ttl < 2) {
-               pr_debug("Error - can't send vis packet: ttl exceeded\n");
-               goto out;
-       }
-
-       memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       packet->header.ttl--;
-
-       if (is_broadcast_ether_addr(packet->target_orig))
-               batadv_broadcast_vis_packet(bat_priv, info);
-       else
-               batadv_unicast_vis_packet(bat_priv, info);
-       packet->header.ttl++; /* restore TTL */
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void batadv_send_vis_packets(struct work_struct *work)
-{
-       struct delayed_work *delayed_work;
-       struct batadv_priv *bat_priv;
-       struct batadv_priv_vis *priv_vis;
-       struct batadv_vis_info *info;
-
-       delayed_work = container_of(work, struct delayed_work, work);
-       priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
-       bat_priv = container_of(priv_vis, struct batadv_priv, vis);
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       batadv_purge_vis_packets(bat_priv);
-
-       if (batadv_generate_vis_packet(bat_priv) == 0) {
-               /* schedule if generation was successful */
-               batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
-       }
-
-       while (!list_empty(&bat_priv->vis.send_list)) {
-               info = list_first_entry(&bat_priv->vis.send_list,
-                                       typeof(*info), send_list);
-
-               kref_get(&info->refcount);
-               spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-               batadv_send_vis_packet(bat_priv, info);
-
-               spin_lock_bh(&bat_priv->vis.hash_lock);
-               batadv_send_list_del(info);
-               kref_put(&info->refcount, batadv_free_info);
-       }
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-                          msecs_to_jiffies(BATADV_VIS_INTERVAL));
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added)
- */
-int batadv_vis_init(struct batadv_priv *bat_priv)
-{
-       struct batadv_vis_packet *packet;
-       int hash_added;
-       unsigned int len;
-       unsigned long first_seen;
-       struct sk_buff *tmp_skb;
-
-       if (bat_priv->vis.hash)
-               return 0;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-
-       bat_priv->vis.hash = batadv_hash_new(256);
-       if (!bat_priv->vis.hash) {
-               pr_err("Can't initialize vis_hash\n");
-               goto err;
-       }
-
-       batadv_hash_set_lock_class(bat_priv->vis.hash,
-                                  &batadv_vis_hash_lock_class_key);
-
-       bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-       if (!bat_priv->vis.my_info)
-               goto err;
-
-       len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-       bat_priv->vis.my_info->skb_packet = netdev_alloc_skb_ip_align(NULL,
-                                                                     len);
-       if (!bat_priv->vis.my_info->skb_packet)
-               goto free_info;
-
-       bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
-       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
-       tmp_skb = bat_priv->vis.my_info->skb_packet;
-       packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
-
-       /* prefill the vis info */
-       first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-       bat_priv->vis.my_info->first_seen = first_seen;
-       INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
-       INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
-       kref_init(&bat_priv->vis.my_info->refcount);
-       bat_priv->vis.my_info->bat_priv = bat_priv;
-       packet->header.version = BATADV_COMPAT_VERSION;
-       packet->header.packet_type = BATADV_VIS;
-       packet->header.ttl = BATADV_TTL;
-       packet->seqno = 0;
-       packet->reserved = 0;
-       packet->entries = 0;
-
-       INIT_LIST_HEAD(&bat_priv->vis.send_list);
-
-       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                    batadv_vis_info_choose,
-                                    bat_priv->vis.my_info,
-                                    &bat_priv->vis.my_info->hash_entry);
-       if (hash_added != 0) {
-               pr_err("Can't add own vis packet into hash\n");
-               /* not in hash, need to remove it manually. */
-               kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
-               goto err;
-       }
-
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-       INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-                          msecs_to_jiffies(BATADV_VIS_INTERVAL));
-
-       return 0;
-
-free_info:
-       kfree(bat_priv->vis.my_info);
-       bat_priv->vis.my_info = NULL;
-err:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-       batadv_vis_quit(bat_priv);
-       return -ENOMEM;
-}
-
-/* Decrease the reference count on a hash item info */
-static void batadv_free_info_ref(struct hlist_node *node, void *arg)
-{
-       struct batadv_vis_info *info;
-
-       info = container_of(node, struct batadv_vis_info, hash_entry);
-       batadv_send_list_del(info);
-       kref_put(&info->refcount, batadv_free_info);
-}
-
-/* shutdown vis-server */
-void batadv_vis_quit(struct batadv_priv *bat_priv)
-{
-       if (!bat_priv->vis.hash)
-               return;
-
-       cancel_delayed_work_sync(&bat_priv->vis.work);
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       /* properly remove, kill timers ... */
-       batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
-       bat_priv->vis.hash = NULL;
-       bat_priv->vis.my_info = NULL;
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
deleted file mode 100644 (file)
index ad92b0e..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-/* timeout of vis packets in milliseconds */
-#define BATADV_VIS_TIMEOUT             200000
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_packet *vis_packet,
-                                      int vis_info_len);
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-                                        struct batadv_vis_packet *vis_packet,
-                                        int vis_info_len);
-int batadv_vis_init(struct batadv_priv *bat_priv);
-void batadv_vis_quit(struct batadv_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
index 9e6cc355310504ae965035e033bdac7d5008527b..ab5241400cf78a9d7371d7a866d58aeba0d8043a 100644 (file)
@@ -182,7 +182,7 @@ struct hidp_session {
 };
 
 /* HIDP init defines */
-extern int __init hidp_init_sockets(void);
-extern void __exit hidp_cleanup_sockets(void);
+int __init hidp_init_sockets(void);
+void __exit hidp_cleanup_sockets(void);
 
 #endif /* __HIDP_H */
index ffd5874f25920a94c74f5d97ebf4a0e2aa77f48d..33e8f23acddd9ca2c817913753142ce063c1fb9e 100644 (file)
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
index 85a09bb5ca51b864cd671c63746d65dfd531ed79..b7b1914dfa252a3731a26e2bcf2be3afc5171661 100644 (file)
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
                err = 0;
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
                break;
index d1c5786306784a7353b845cad60e2d39abec0f72..0513ef3ce6673dbc62ab4ff8dd8d28af6a0f6478 100644 (file)
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                del_timer(&p->timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
 
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_dest[0] = 1;
        eth->h_dest[1] = 0;
        eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
@@ -620,7 +620,6 @@ rehash:
 
        mp->br = br;
        mp->addr = *group;
-
        setup_timer(&mp->timer, br_multicast_group_expired,
                    (unsigned long)mp);
 
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
        struct net_bridge_port_group __rcu **pp;
+       unsigned long now = jiffies;
        int err;
 
        spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
 
        if (!port) {
                mp->mglist = true;
+               mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
 
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
             (p = mlock_dereference(*pp, br)) != NULL;
             pp = &p->next) {
                if (p->port == port)
-                       goto out;
+                       goto found;
                if ((unsigned long)p->port < (unsigned long)port)
                        break;
        }
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
        rcu_assign_pointer(*pp, p);
        br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 
+found:
+       mod_timer(&p->timer, now + br->multicast_membership_interval);
 out:
        err = 0;
 
@@ -1191,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
 
        if (mp->mglist &&
@@ -1270,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
        if (mp->mglist &&
            (timer_pending(&mp->timer) ?
@@ -1358,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
                        br_mdb_notify(br->dev, port, group, RTM_DELMDB);
 
-                       if (!mp->ports && !mp->mglist && mp->timer_armed &&
+                       if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
                                mod_timer(&mp->timer, jiffies);
                }
@@ -1370,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (mp->mglist && mp->timer_armed &&
+               if (mp->mglist &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
                        mod_timer(&mp->timer, time);
                }
+
+               goto out;
+       }
+
+       for (p = mlock_dereference(mp->ports, br);
+            p != NULL;
+            p = mlock_dereference(p->next, br)) {
+               if (p->port != port)
+                       continue;
+
+               if (!hlist_unhashed(&p->mglist) &&
+                   (timer_pending(&p->timer) ?
+                    time_after(p->timer.expires, time) :
+                    try_to_del_timer_sync(&p->timer) >= 0)) {
+                       mod_timer(&p->timer, time);
+               }
+
+               break;
        }
 out:
        spin_unlock(&br->multicast_lock);
@@ -1798,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br)
                hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
-                       mp->timer_armed = false;
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
                }
        }
index f87736270eaa875ba0060048a38cfabacfd29444..878f008afefac63bd102f948986d2f89e3fd446a 100644 (file)
@@ -619,7 +619,7 @@ bad:
 
 /* Replicate the checks that IPv6 does on packet reception and pass the packet
  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
-static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
+static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
@@ -669,7 +669,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
  * receiving device) to make netfilter happy, the REDIRECT
  * target in particular.  Save the original destination IP
  * address to be able to detect DNAT afterwards. */
-static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
@@ -691,7 +692,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
                        return NF_ACCEPT;
 
                nf_bridge_pull_encap_header_rcsum(skb);
-               return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
+               return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
        }
 
        if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -727,7 +728,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
  * took place when the packet entered the bridge), but we
  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
  * prevent this from happening. */
-static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
                                   const struct net_device *in,
                                   const struct net_device *out,
                                   int (*okfn)(struct sk_buff *))
@@ -765,7 +767,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
  * but we are still able to filter on the 'real' indev/outdev
  * because of the physdev module. For ARP, indev and outdev are the
  * bridge ports. */
-static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+                                    struct sk_buff *skb,
                                     const struct net_device *in,
                                     const struct net_device *out,
                                     int (*okfn)(struct sk_buff *))
@@ -818,7 +821,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
        return NF_STOLEN;
 }
 
-static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
@@ -878,7 +882,8 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 #endif
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
-static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
                                       int (*okfn)(struct sk_buff *))
@@ -923,7 +928,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
 /* IP/SABOTAGE *****************************************************/
 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
  * for the second time. */
-static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
                                   const struct net_device *in,
                                   const struct net_device *out,
                                   int (*okfn)(struct sk_buff *))
index e74ddc1c29a8be20f1a093fcc8e27b11bca6f03e..f75d92e4f96b33cec6fd46fc37151224f86ed018 100644 (file)
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
 
                vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
 
-               if (vinfo->vid >= VLAN_N_VID)
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
                        return -EINVAL;
 
                switch (cmd) {
index efb57d91156975b3b43a2afdc588128a2f6e1f79..d1ca6d9566332055591b96979805748a4c76e6c5 100644 (file)
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
        struct timer_list               timer;
        struct br_ip                    addr;
        bool                            mglist;
-       bool                            timer_armed;
 };
 
 struct net_bridge_mdb_htable
@@ -344,10 +343,9 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
 }
 
 /* br_device.c */
-extern void br_dev_setup(struct net_device *dev);
-extern void br_dev_delete(struct net_device *dev, struct list_head *list);
-extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
-                              struct net_device *dev);
+void br_dev_setup(struct net_device *dev);
+void br_dev_delete(struct net_device *dev, struct list_head *list);
+netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
@@ -358,8 +356,8 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                netpoll_send_skb(np, skb);
 }
 
-extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
-extern void br_netpoll_disable(struct net_bridge_port *p);
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
+void br_netpoll_disable(struct net_bridge_port *p);
 #else
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
@@ -377,116 +375,99 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
 #endif
 
 /* br_fdb.c */
-extern int br_fdb_init(void);
-extern void br_fdb_fini(void);
-extern void br_fdb_flush(struct net_bridge *br);
-extern void br_fdb_changeaddr(struct net_bridge_port *p,
-                             const unsigned char *newaddr);
-extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
-extern void br_fdb_cleanup(unsigned long arg);
-extern void br_fdb_delete_by_port(struct net_bridge *br,
-                                 const struct net_bridge_port *p, int do_all);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                                const unsigned char *addr,
-                                                __u16 vid);
-extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
-extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
-                         unsigned long count, unsigned long off);
-extern int br_fdb_insert(struct net_bridge *br,
-                        struct net_bridge_port *source,
-                        const unsigned char *addr,
-                        u16 vid);
-extern void br_fdb_update(struct net_bridge *br,
-                         struct net_bridge_port *source,
-                         const unsigned char *addr,
-                         u16 vid);
-extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
-
-extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
-                        struct net_device *dev,
-                        const unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
-                     struct net_device *dev,
-                     const unsigned char *addr,
-                     u16 nlh_flags);
-extern int br_fdb_dump(struct sk_buff *skb,
-                      struct netlink_callback *cb,
-                      struct net_device *dev,
-                      int idx);
+int br_fdb_init(void);
+void br_fdb_fini(void);
+void br_fdb_flush(struct net_bridge *br);
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
+void br_fdb_cleanup(unsigned long arg);
+void br_fdb_delete_by_port(struct net_bridge *br,
+                          const struct net_bridge_port *p, int do_all);
+struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+                                         const unsigned char *addr, __u16 vid);
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
+int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
+                  unsigned long off);
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+                 const unsigned char *addr, u16 vid);
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+                  const unsigned char *addr, u16 vid);
+int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+                 struct net_device *dev, const unsigned char *addr);
+int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
+              const unsigned char *addr, u16 nlh_flags);
+int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct net_device *dev, int idx);
 
 /* br_forward.c */
-extern void br_deliver(const struct net_bridge_port *to,
-               struct sk_buff *skb);
-extern int br_dev_queue_push_xmit(struct sk_buff *skb);
-extern void br_forward(const struct net_bridge_port *to,
+void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sk_buff *skb);
+void br_forward(const struct net_bridge_port *to,
                struct sk_buff *skb, struct sk_buff *skb0);
-extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
-                            bool unicast);
-extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
-                            struct sk_buff *skb2, bool unicast);
+int br_forward_finish(struct sk_buff *skb);
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+                     struct sk_buff *skb2, bool unicast);
 
 /* br_if.c */
-extern void br_port_carrier_check(struct net_bridge_port *p);
-extern int br_add_bridge(struct net *net, const char *name);
-extern int br_del_bridge(struct net *net, const char *name);
-extern void br_net_exit(struct net *net);
-extern int br_add_if(struct net_bridge *br,
-             struct net_device *dev);
-extern int br_del_if(struct net_bridge *br,
-             struct net_device *dev);
-extern int br_min_mtu(const struct net_bridge *br);
-extern netdev_features_t br_features_recompute(struct net_bridge *br,
-       netdev_features_t features);
+void br_port_carrier_check(struct net_bridge_port *p);
+int br_add_bridge(struct net *net, const char *name);
+int br_del_bridge(struct net *net, const char *name);
+void br_net_exit(struct net *net);
+int br_add_if(struct net_bridge *br, struct net_device *dev);
+int br_del_if(struct net_bridge *br, struct net_device *dev);
+int br_min_mtu(const struct net_bridge *br);
+netdev_features_t br_features_recompute(struct net_bridge *br,
+                                       netdev_features_t features);
 
 /* br_input.c */
-extern int br_handle_frame_finish(struct sk_buff *skb);
-extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+int br_handle_frame_finish(struct sk_buff *skb);
+rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
 /* br_ioctl.c */
-extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
+                            void __user *arg);
 
 /* br_multicast.c */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 extern unsigned int br_mdb_rehash_seq;
-extern int br_multicast_rcv(struct net_bridge *br,
-                           struct net_bridge_port *port,
-                           struct sk_buff *skb);
-extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
-                                              struct sk_buff *skb, u16 vid);
-extern void br_multicast_add_port(struct net_bridge_port *port);
-extern void br_multicast_del_port(struct net_bridge_port *port);
-extern void br_multicast_enable_port(struct net_bridge_port *port);
-extern void br_multicast_disable_port(struct net_bridge_port *port);
-extern void br_multicast_init(struct net_bridge *br);
-extern void br_multicast_open(struct net_bridge *br);
-extern void br_multicast_stop(struct net_bridge *br);
-extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
-                                struct sk_buff *skb);
-extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
-                                struct sk_buff *skb, struct sk_buff *skb2);
-extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_port_router(struct net_bridge_port *p,
-                                       unsigned long val);
-extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
-extern struct net_bridge_mdb_entry *br_mdb_ip_get(
-                               struct net_bridge_mdb_htable *mdb,
-                               struct br_ip *dst);
-extern struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
-                               struct net_bridge_port *port, struct br_ip *group);
-extern void br_multicast_free_pg(struct rcu_head *head);
-extern struct net_bridge_port_group *br_multicast_new_port_group(
-                               struct net_bridge_port *port,
-                               struct br_ip *group,
-                               struct net_bridge_port_group __rcu *next,
-                               unsigned char state);
-extern void br_mdb_init(void);
-extern void br_mdb_uninit(void);
-extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                         struct br_ip *group, int type);
+int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
+                    struct sk_buff *skb);
+struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+                                       struct sk_buff *skb, u16 vid);
+void br_multicast_add_port(struct net_bridge_port *port);
+void br_multicast_del_port(struct net_bridge_port *port);
+void br_multicast_enable_port(struct net_bridge_port *port);
+void br_multicast_disable_port(struct net_bridge_port *port);
+void br_multicast_init(struct net_bridge *br);
+void br_multicast_open(struct net_bridge *br);
+void br_multicast_stop(struct net_bridge *br);
+void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+                         struct sk_buff *skb);
+void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+                         struct sk_buff *skb, struct sk_buff *skb2);
+int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
+int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
+int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+struct net_bridge_mdb_entry *
+br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
+struct net_bridge_mdb_entry *
+br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
+                      struct br_ip *group);
+void br_multicast_free_pg(struct rcu_head *head);
+struct net_bridge_port_group *
+br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
+                           struct net_bridge_port_group __rcu *next,
+                           unsigned char state);
+void br_mdb_init(void);
+void br_mdb_uninit(void);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type);
 
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -591,22 +572,21 @@ static inline void br_mdb_uninit(void)
 
 /* br_vlan.c */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-                              struct sk_buff *skb, u16 *vid);
-extern bool br_allowed_egress(struct net_bridge *br,
-                             const struct net_port_vlans *v,
-                             const struct sk_buff *skb);
-extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
-                                     const struct net_port_vlans *v,
-                                     struct sk_buff *skb);
-extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
-extern int br_vlan_delete(struct net_bridge *br, u16 vid);
-extern void br_vlan_flush(struct net_bridge *br);
-extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
-extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
-extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
-extern void nbp_vlan_flush(struct net_bridge_port *port);
-extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+                       struct sk_buff *skb, u16 *vid);
+bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+                      const struct sk_buff *skb);
+struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                              const struct net_port_vlans *v,
+                              struct sk_buff *skb);
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
+int br_vlan_delete(struct net_bridge *br, u16 vid);
+void br_vlan_flush(struct net_bridge *br);
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
+int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
+void nbp_vlan_flush(struct net_bridge_port *port);
+bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
 
 static inline struct net_port_vlans *br_get_vlan_info(
                                                const struct net_bridge *br)
@@ -643,9 +623,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
         * vid wasn't set
         */
        smp_rmb();
-       return (v->pvid & VLAN_TAG_PRESENT) ?
-                       (v->pvid & ~VLAN_TAG_PRESENT) :
-                       VLAN_N_VID;
+       return v->pvid ?: VLAN_N_VID;
 }
 
 #else
@@ -727,9 +705,9 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
 
 /* br_netfilter.c */
 #ifdef CONFIG_BRIDGE_NETFILTER
-extern int br_netfilter_init(void);
-extern void br_netfilter_fini(void);
-extern void br_netfilter_rtable_init(struct net_bridge *);
+int br_netfilter_init(void);
+void br_netfilter_fini(void);
+void br_netfilter_rtable_init(struct net_bridge *);
 #else
 #define br_netfilter_init()    (0)
 #define br_netfilter_fini()    do { } while(0)
@@ -737,43 +715,39 @@ extern void br_netfilter_rtable_init(struct net_bridge *);
 #endif
 
 /* br_stp.c */
-extern void br_log_state(const struct net_bridge_port *p);
-extern struct net_bridge_port *br_get_port(struct net_bridge *br,
-                                          u16 port_no);
-extern void br_init_port(struct net_bridge_port *p);
-extern void br_become_designated_port(struct net_bridge_port *p);
+void br_log_state(const struct net_bridge_port *p);
+struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
+void br_init_port(struct net_bridge_port *p);
+void br_become_designated_port(struct net_bridge_port *p);
 
-extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
-extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
-extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
-extern int br_set_max_age(struct net_bridge *br, unsigned long x);
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
+int br_set_forward_delay(struct net_bridge *br, unsigned long x);
+int br_set_hello_time(struct net_bridge *br, unsigned long x);
+int br_set_max_age(struct net_bridge *br, unsigned long x);
 
 
 /* br_stp_if.c */
-extern void br_stp_enable_bridge(struct net_bridge *br);
-extern void br_stp_disable_bridge(struct net_bridge *br);
-extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
-extern void br_stp_enable_port(struct net_bridge_port *p);
-extern void br_stp_disable_port(struct net_bridge_port *p);
-extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
-extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
-extern void br_stp_set_bridge_priority(struct net_bridge *br,
-                                      u16 newprio);
-extern int br_stp_set_port_priority(struct net_bridge_port *p,
-                                   unsigned long newprio);
-extern int br_stp_set_path_cost(struct net_bridge_port *p,
-                               unsigned long path_cost);
-extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
+void br_stp_enable_bridge(struct net_bridge *br);
+void br_stp_disable_bridge(struct net_bridge *br);
+void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
+void br_stp_enable_port(struct net_bridge_port *p);
+void br_stp_disable_port(struct net_bridge_port *p);
+bool br_stp_recalculate_bridge_id(struct net_bridge *br);
+void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
+void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio);
+int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
+int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
+ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
 
 /* br_stp_bpdu.c */
 struct stp_proto;
-extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
-                      struct net_device *dev);
+void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+               struct net_device *dev);
 
 /* br_stp_timer.c */
-extern void br_stp_timer_init(struct net_bridge *br);
-extern void br_stp_port_timer_init(struct net_bridge_port *p);
-extern unsigned long br_timer_value(const struct timer_list *timer);
+void br_stp_timer_init(struct net_bridge *br);
+void br_stp_port_timer_init(struct net_bridge_port *p);
+unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
 #if IS_ENABLED(CONFIG_ATM_LANE)
@@ -782,23 +756,23 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 
 /* br_netlink.c */
 extern struct rtnl_link_ops br_link_ops;
-extern int br_netlink_init(void);
-extern void br_netlink_fini(void);
-extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
-extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                     struct net_device *dev, u32 filter_mask);
+int br_netlink_init(void);
+void br_netlink_fini(void);
+void br_ifinfo_notify(int event, struct net_bridge_port *port);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
+              u32 filter_mask);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
 extern const struct sysfs_ops brport_sysfs_ops;
-extern int br_sysfs_addif(struct net_bridge_port *p);
-extern int br_sysfs_renameif(struct net_bridge_port *p);
+int br_sysfs_addif(struct net_bridge_port *p);
+int br_sysfs_renameif(struct net_bridge_port *p);
 
 /* br_sysfs_br.c */
-extern int br_sysfs_addbr(struct net_device *dev);
-extern void br_sysfs_delbr(struct net_device *dev);
+int br_sysfs_addbr(struct net_device *dev);
+void br_sysfs_delbr(struct net_device *dev);
 
 #else
 
index 0c0fe36e7aa92eebd67e3e91d07c440a5310090b..2fe910c4e1700776c33275f0ae277d354e348f6c 100644 (file)
@@ -51,19 +51,19 @@ static inline int br_is_designated_port(const struct net_bridge_port *p)
 
 
 /* br_stp.c */
-extern void br_become_root_bridge(struct net_bridge *br);
-extern void br_config_bpdu_generation(struct net_bridge *);
-extern void br_configuration_update(struct net_bridge *);
-extern void br_port_state_selection(struct net_bridge *);
-extern void br_received_config_bpdu(struct net_bridge_port *p,
-                                   const struct br_config_bpdu *bpdu);
-extern void br_received_tcn_bpdu(struct net_bridge_port *p);
-extern void br_transmit_config(struct net_bridge_port *p);
-extern void br_transmit_tcn(struct net_bridge *br);
-extern void br_topology_change_detection(struct net_bridge *br);
+void br_become_root_bridge(struct net_bridge *br);
+void br_config_bpdu_generation(struct net_bridge *);
+void br_configuration_update(struct net_bridge *);
+void br_port_state_selection(struct net_bridge *);
+void br_received_config_bpdu(struct net_bridge_port *p,
+                            const struct br_config_bpdu *bpdu);
+void br_received_tcn_bpdu(struct net_bridge_port *p);
+void br_transmit_config(struct net_bridge_port *p);
+void br_transmit_tcn(struct net_bridge *br);
+void br_topology_change_detection(struct net_bridge *br);
 
 /* br_stp_bpdu.c */
-extern void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
-extern void br_send_tcn_bpdu(struct net_bridge_port *);
+void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
+void br_send_tcn_bpdu(struct net_bridge_port *);
 
 #endif
index 108084a0467160e30eb001cd2dbb326fdc4dadd3..656a6f3e40de1b13b9ea7a89373da5d5615e5bb2 100644 (file)
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
 
        if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
-       else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+       else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
 
        if (r == 0) {
index 9a9ffe7e4019741d75456e3b9afdba21c44785b3..53f0990eab58e08a3da9160a27ee0834bd0f0272 100644 (file)
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
                return 0;
        }
 
-       if (vid) {
-               if (v->port_idx) {
-                       p = v->parent.port;
-                       br = p->br;
-                       dev = p->dev;
-               } else {
-                       br = v->parent.br;
-                       dev = br->dev;
-               }
-               ops = dev->netdev_ops;
-
-               if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
-                       /* Add VLAN to the device filter if it is supported.
-                        * Stricly speaking, this is not necessary now, since
-                        * devices are made promiscuous by the bridge, but if
-                        * that ever changes this code will allow tagged
-                        * traffic to enter the bridge.
-                        */
-                       err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
-                                                      vid);
-                       if (err)
-                               return err;
-               }
-
-               err = br_fdb_insert(br, p, dev->dev_addr, vid);
-               if (err) {
-                       br_err(br, "failed insert local address into bridge "
-                              "forwarding table\n");
-                       goto out_filt;
-               }
+       if (v->port_idx) {
+               p = v->parent.port;
+               br = p->br;
+               dev = p->dev;
+       } else {
+               br = v->parent.br;
+               dev = br->dev;
+       }
+       ops = dev->netdev_ops;
+
+       if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+               /* Add VLAN to the device filter if it is supported.
+                * Stricly speaking, this is not necessary now, since
+                * devices are made promiscuous by the bridge, but if
+                * that ever changes this code will allow tagged
+                * traffic to enter the bridge.
+                */
+               err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
+                                              vid);
+               if (err)
+                       return err;
+       }
 
+       err = br_fdb_insert(br, p, dev->dev_addr, vid);
+       if (err) {
+               br_err(br, "failed insert local address into bridge "
+                      "forwarding table\n");
+               goto out_filt;
        }
 
        set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
        __vlan_delete_pvid(v, vid);
        clear_bit(vid, v->untagged_bitmap);
 
-       if (v->port_idx && vid) {
+       if (v->port_idx) {
                struct net_device *dev = v->parent.port->dev;
                const struct net_device_ops *ops = dev->netdev_ops;
 
@@ -192,6 +189,8 @@ out:
 bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
                        struct sk_buff *skb, u16 *vid)
 {
+       int err;
+
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                return false;
 
-       if (br_vlan_get_tag(skb, vid)) {
+       err = br_vlan_get_tag(skb, vid);
+       if (!*vid) {
                u16 pvid = br_get_pvid(v);
 
-               /* Frame did not have a tag.  See if pvid is set
-                * on this port.  That tells us which vlan untagged
-                * traffic belongs to.
+               /* Frame had a tag with VID 0 or did not have a tag.
+                * See if pvid is set on this port.  That tells us which
+                * vlan untagged or priority-tagged traffic belongs to.
                 */
                if (pvid == VLAN_N_VID)
                        return false;
 
-               /* PVID is set on this port.  Any untagged ingress
-                * frame is considered to belong to this vlan.
+               /* PVID is set on this port.  Any untagged or priority-tagged
+                * ingress frame is considered to belong to this vlan.
                 */
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               *vid = pvid;
+               if (likely(err))
+                       /* Untagged Frame. */
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               else
+                       /* Priority-tagged Frame.
+                        * At this point, We know that skb->vlan_tci had
+                        * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+                        * We update only VID field and preserve PCP field.
+                        */
+                       skb->vlan_tci |= pvid;
+
                return true;
        }
 
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
        return false;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&br->hash_lock);
-               fdb_delete_by_addr(br, br->dev->dev_addr, vid);
-               spin_unlock_bh(&br->hash_lock);
-       }
+       spin_lock_bh(&br->hash_lock);
+       fdb_delete_by_addr(br, br->dev->dev_addr, vid);
+       spin_unlock_bh(&br->hash_lock);
 
        __vlan_del(pv, vid);
        return 0;
@@ -329,7 +339,9 @@ unlock:
        return 0;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&port->br->hash_lock);
-               fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
-               spin_unlock_bh(&port->br->hash_lock);
-       }
+       spin_lock_bh(&port->br->hash_lock);
+       fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
+       spin_unlock_bh(&port->br->hash_lock);
 
        return __vlan_del(pv, vid);
 }
index a9aff9c7d0273b2a41ef9d122ac38e769956a640..68f8128147be7c9eb2ae6ab9593d999308eea755 100644 (file)
@@ -1,6 +1,9 @@
 #
 # Bridge netfilter configuration
 #
+#
+config NF_TABLES_BRIDGE
+       tristate "Ethernet Bridge nf_tables support"
 
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
index 0718699540b023fd6d95be217b79e4bfe5029821..ea7629f58b3d1c44e28524df8a0937de3a18546b 100644 (file)
@@ -2,6 +2,8 @@
 # Makefile for the netfilter modules for Link Layer filtering on a bridge.
 #
 
+obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
 # tables
index 8b84c581be3082ea4c8a6a21a362a3077ab17751..3fb3c848affef74249a1fd9ed610aea7d1db5764 100644 (file)
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
        uint32_t cmp[2] = { 0, 0 };
        int key = ((const unsigned char *)mac)[5];
 
-       memcpy(((char *) cmp) + 2, mac, 6);
+       memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
        start = wh->table[key];
        limit = wh->table[key + 1];
        if (ip) {
index 518093802d1d640f3642e997b1481279fad9b68b..7c470c371e14f82a9734d3da4d0bb0a177169251 100644 (file)
@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
        ub->qlen++;
 
        pm = nlmsg_data(nlh);
+       memset(pm, 0, sizeof(*pm));
 
        /* Fill in the ulog data */
        pm->version = EBT_ULOG_VERSION;
@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
        pm->hook = hooknr;
        if (uloginfo->prefix != NULL)
                strcpy(pm->prefix, uloginfo->prefix);
-       else
-               *(pm->prefix) = '\0';
 
        if (in) {
                strcpy(pm->physindev, in->name);
@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
                        strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
                else
                        strcpy(pm->indev, in->name);
-       } else
-               pm->indev[0] = pm->physindev[0] = '\0';
+       }
 
        if (out) {
                /* If out exists, then out is a bridge port */
                strcpy(pm->physoutdev, out->name);
                /* rcu_read_lock()ed by nf_hook_slow */
                strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
-       } else
-               pm->outdev[0] = pm->physoutdev[0] = '\0';
+       }
 
        if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
                BUG();
index 94b2b700cff8d0de28058497f7b863ce685633a2..bb2da7b706e7214f05fb7d56f87142e5ed5a5667 100644 (file)
@@ -60,17 +60,21 @@ static const struct ebt_table frame_filter =
 };
 
 static unsigned int
-ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+           const struct net_device *in, const struct net_device *out,
+           int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(in)->xt.frame_filter);
 }
 
 static unsigned int
-ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+            const struct net_device *in, const struct net_device *out,
+            int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(out)->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
index 322555acdd4009aaa677147f7a33e558d82a67cc..bd238f1f105b94ac5f8db9982b7738e255f42630 100644 (file)
@@ -60,17 +60,21 @@ static struct ebt_table frame_nat =
 };
 
 static unsigned int
-ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+          const struct net_device *in, const struct net_device *out,
+          int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(in)->xt.frame_nat);
 }
 
 static unsigned int
-ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+           const struct net_device *in, const struct net_device *out,
+           int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(out)->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
new file mode 100644 (file)
index 0000000..e8cb016
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter_bridge.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_bridge __read_mostly = {
+       .family         = NFPROTO_BRIDGE,
+       .nhooks         = NF_BR_NUMHOOKS,
+       .owner          = THIS_MODULE,
+};
+
+static int nf_tables_bridge_init_net(struct net *net)
+{
+       net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.bridge == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
+
+       if (nft_register_afinfo(net, net->nft.bridge) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.bridge);
+       return -ENOMEM;
+}
+
+static void nf_tables_bridge_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.bridge);
+       kfree(net->nft.bridge);
+}
+
+static struct pernet_operations nf_tables_bridge_net_ops = {
+       .init   = nf_tables_bridge_init_net,
+       .exit   = nf_tables_bridge_exit_net,
+};
+
+static int __init nf_tables_bridge_init(void)
+{
+       return register_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+static void __exit nf_tables_bridge_exit(void)
+{
+       return unregister_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+module_init(nf_tables_bridge_init);
+module_exit(nf_tables_bridge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
index 1dccb4c33894af7074ee85e320e32c035dbfa368..6de58b40535cc309e59f1a61ed624c930b7c7ffe 100644 (file)
@@ -108,9 +108,9 @@ struct s_pstats {
 extern struct dev_rcv_lists can_rx_alldev_list;
 
 /* function prototypes for the CAN networklayer procfs (proc.c) */
-extern void can_init_proc(void);
-extern void can_remove_proc(void);
-extern void can_stat_update(unsigned long data);
+void can_init_proc(void);
+void can_remove_proc(void);
+void can_stat_update(unsigned long data);
 
 /* structures and variables from af_can.c needed in proc.c for reading */
 extern struct timer_list can_stattimer;    /* timer for statistics update */
index ed7d088b1bc92e14372e2f24c8f5016750e07014..059a3ce4b53f48425336bf4b792d2ac6a5c6e849 100644 (file)
@@ -23,7 +23,7 @@ struct ceph_auth_none_info {
        struct ceph_none_authorizer au;   /* we only need one; it's static */
 };
 
-extern int ceph_auth_none_init(struct ceph_auth_client *ac);
+int ceph_auth_none_init(struct ceph_auth_client *ac);
 
 #endif
 
index c5a058da7ac8e7ae6a51f695b2a9d1b9362cdab3..65ee72082d99866d0f98057f15c4f181b21d971c 100644 (file)
@@ -45,7 +45,7 @@ struct ceph_x_info {
        struct ceph_x_authorizer auth_authorizer;
 };
 
-extern int ceph_x_init(struct ceph_auth_client *ac);
+int ceph_x_init(struct ceph_auth_client *ac);
 
 #endif
 
index 3572dc518bc984aece38fb8bd9f8cbe7b1b5c239..d1498224c49d4c6c18e082ea908cb3e5a8fed30c 100644 (file)
@@ -20,34 +20,32 @@ static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
                kfree(key->key);
 }
 
-extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
-                                const struct ceph_crypto_key *src);
-extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
-                                 void **p, void *end);
-extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
-                                 void **p, void *end);
-extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+                         const struct ceph_crypto_key *src);
+int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
 
 /* crypto.c */
-extern int ceph_decrypt(struct ceph_crypto_key *secret,
-                       void *dst, size_t *dst_len,
-                       const void *src, size_t src_len);
-extern int ceph_encrypt(struct ceph_crypto_key *secret,
-                       void *dst, size_t *dst_len,
-                       const void *src, size_t src_len);
-extern int ceph_decrypt2(struct ceph_crypto_key *secret,
-                       void *dst1, size_t *dst1_len,
-                       void *dst2, size_t *dst2_len,
-                       const void *src, size_t src_len);
-extern int ceph_encrypt2(struct ceph_crypto_key *secret,
-                        void *dst, size_t *dst_len,
-                        const void *src1, size_t src1_len,
-                        const void *src2, size_t src2_len);
-extern int ceph_crypto_init(void);
-extern void ceph_crypto_shutdown(void);
+int ceph_decrypt(struct ceph_crypto_key *secret,
+                void *dst, size_t *dst_len,
+                const void *src, size_t src_len);
+int ceph_encrypt(struct ceph_crypto_key *secret,
+                void *dst, size_t *dst_len,
+                const void *src, size_t src_len);
+int ceph_decrypt2(struct ceph_crypto_key *secret,
+                 void *dst1, size_t *dst1_len,
+                 void *dst2, size_t *dst2_len,
+                 const void *src, size_t src_len);
+int ceph_encrypt2(struct ceph_crypto_key *secret,
+                 void *dst, size_t *dst_len,
+                 const void *src1, size_t src1_len,
+                 const void *src2, size_t src2_len);
+int ceph_crypto_init(void);
+void ceph_crypto_shutdown(void);
 
 /* armor.c */
-extern int ceph_armor(char *dst, const char *src, const char *end);
-extern int ceph_unarmor(char *dst, const char *src, const char *end);
+int ceph_armor(char *dst, const char *src, const char *end);
+int ceph_unarmor(char *dst, const char *src, const char *end);
 
 #endif
index f0a1ba6c8086acc65a87e519fca48853a3bd091e..89032580bd1d8aa5ef29d1395661bdd0bcfe95f1 100644 (file)
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
            __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
            __get_user(kmsg->msg_flags, &umsg->msg_flags))
                return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
        kmsg->msg_name = compat_ptr(tmp1);
        kmsg->msg_iov = compat_ptr(tmp2);
        kmsg->msg_control = compat_ptr(tmp3);
index 5c713f2239cc6245d230d2e35e243bbee7339761..0918aadc20fd2ce528ef284da6d6c87f5254f6a0 100644 (file)
@@ -1307,7 +1307,7 @@ static int __dev_close_many(struct list_head *head)
        ASSERT_RTNL();
        might_sleep();
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
                clear_bit(__LINK_STATE_START, &dev->state);
@@ -1323,7 +1323,7 @@ static int __dev_close_many(struct list_head *head)
 
        dev_deactivate_many(head);
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                const struct net_device_ops *ops = dev->netdev_ops;
 
                /*
@@ -1351,7 +1351,7 @@ static int __dev_close(struct net_device *dev)
        /* Temporarily disable netpoll until the interface is down */
        netpoll_rx_disable(dev);
 
-       list_add(&dev->unreg_list, &single);
+       list_add(&dev->close_list, &single);
        retval = __dev_close_many(&single);
        list_del(&single);
 
@@ -1362,21 +1362,20 @@ static int __dev_close(struct net_device *dev)
 static int dev_close_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
-       LIST_HEAD(tmp_list);
 
-       list_for_each_entry_safe(dev, tmp, head, unreg_list)
+       /* Remove the devices that don't need to be closed */
+       list_for_each_entry_safe(dev, tmp, head, close_list)
                if (!(dev->flags & IFF_UP))
-                       list_move(&dev->unreg_list, &tmp_list);
+                       list_del_init(&dev->close_list);
 
        __dev_close_many(head);
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry_safe(dev, tmp, head, close_list) {
                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
                call_netdevice_notifiers(NETDEV_DOWN, dev);
+               list_del_init(&dev->close_list);
        }
 
-       /* rollback_registered_many needs the complete original list */
-       list_splice(&tmp_list, head);
        return 0;
 }
 
@@ -1397,7 +1396,7 @@ int dev_close(struct net_device *dev)
                /* Block netpoll rx while the interface is going down */
                netpoll_rx_disable(dev);
 
-               list_add(&dev->unreg_list, &single);
+               list_add(&dev->close_list, &single);
                dev_close_many(&single);
                list_del(&single);
 
@@ -1917,7 +1916,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
        return new_map;
 }
 
-int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index)
 {
        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
        struct xps_map *map, *new_map;
@@ -2377,6 +2377,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
        }
 
        SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+       SKB_GSO_CB(skb)->encap_level = 0;
+
        skb_reset_mac_header(skb);
        skb_reset_mac_len(skb);
 
@@ -4373,42 +4375,40 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
-       /* indicates that this dev is our first-level lower/upper device */
-       bool neighbour;
-
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
+       /* private field for the users */
+       void *private;
+
        struct list_head list;
        struct rcu_head rcu;
 };
 
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
-                                                struct net_device *adj_dev,
-                                                bool upper)
+static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
+                                                    struct net_device *adj_dev,
+                                                    struct list_head *adj_list)
 {
        struct netdev_adjacent *adj;
-       struct list_head *dev_list;
 
-       dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
-
-       list_for_each_entry(adj, dev_list, list) {
+       list_for_each_entry_rcu(adj, adj_list, list) {
                if (adj->dev == adj_dev)
                        return adj;
        }
        return NULL;
 }
 
-static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
-                                                         struct net_device *udev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *adj_list)
 {
-       return __netdev_find_adj(dev, udev, true);
-}
+       struct netdev_adjacent *adj;
 
-static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
-                                                         struct net_device *ldev)
-{
-       return __netdev_find_adj(dev, ldev, false);
+       list_for_each_entry(adj, adj_list, list) {
+               if (adj->dev == adj_dev)
+                       return adj;
+       }
+       return NULL;
 }
 
 /**
@@ -4425,7 +4425,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return __netdev_find_upper(dev, upper_dev);
+       return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
 
@@ -4440,7 +4440,7 @@ bool netdev_has_any_upper_dev(struct net_device *dev)
 {
        ASSERT_RTNL();
 
-       return !list_empty(&dev->upper_dev_list);
+       return !list_empty(&dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
@@ -4457,10 +4457,10 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       if (list_empty(&dev->upper_dev_list))
+       if (list_empty(&dev->adj_list.upper))
                return NULL;
 
-       upper = list_first_entry(&dev->upper_dev_list,
+       upper = list_first_entry(&dev->adj_list.upper,
                                 struct netdev_adjacent, list);
        if (likely(upper->master))
                return upper->dev;
@@ -4468,15 +4468,26 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
-/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+void *netdev_adjacent_get_private(struct list_head *adj_list)
+{
+       struct netdev_adjacent *adj;
+
+       adj = list_entry(adj_list, struct netdev_adjacent, list);
+
+       return adj->private;
+}
+EXPORT_SYMBOL(netdev_adjacent_get_private);
+
+/**
+ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
  * @iter: list_head ** of the current position
  *
  * Gets the next device from the dev's upper list, starting from iter
  * position. The caller must hold RCU read lock.
  */
-struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                struct list_head **iter)
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter)
 {
        struct netdev_adjacent *upper;
 
@@ -4484,14 +4495,71 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
-       if (&upper->list == &dev->upper_dev_list)
+       if (&upper->list == &dev->all_adj_list.upper)
                return NULL;
 
        *iter = &upper->list;
 
        return upper->dev;
 }
-EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
+
+/**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ *                                lower neighbour list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold either hold the
+ * RTNL lock or its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry(*iter, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = lower->list.next;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private);
+
+/**
+ * netdev_lower_get_next_private_rcu - Get the next ->private from the
+ *                                    lower neighbour list, RCU
+ *                                    variant
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = &lower->list;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
 /**
  * netdev_master_upper_dev_get_rcu - Get master upper device
@@ -4504,7 +4572,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
 {
        struct netdev_adjacent *upper;
 
-       upper = list_first_or_null_rcu(&dev->upper_dev_list,
+       upper = list_first_or_null_rcu(&dev->adj_list.upper,
                                       struct netdev_adjacent, list);
        if (upper && likely(upper->master))
                return upper->dev;
@@ -4514,15 +4582,16 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
-                                       bool neighbour, bool master,
-                                       bool upper)
+                                       struct list_head *dev_list,
+                                       void *private, bool master)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
+       int ret;
 
-       adj = __netdev_find_adj(dev, adj_dev, upper);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
        if (adj) {
-               BUG_ON(neighbour);
                adj->ref_nr++;
                return 0;
        }
@@ -4533,124 +4602,178 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
 
        adj->dev = adj_dev;
        adj->master = master;
-       adj->neighbour = neighbour;
        adj->ref_nr = 1;
-
+       adj->private = private;
        dev_hold(adj_dev);
-       pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
 
-       if (!upper) {
-               list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
-               return 0;
+       pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
        }
 
-       /* Ensure that master upper link is always the first item in list. */
-       if (master)
-               list_add_rcu(&adj->list, &dev->upper_dev_list);
-       else
-               list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+       /* Ensure that master link is always the first item in list. */
+       if (master) {
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), "master");
+               if (ret)
+                       goto remove_symlinks;
+
+               list_add_rcu(&adj->list, dev_list);
+       } else {
+               list_add_tail_rcu(&adj->list, dev_list);
+       }
 
        return 0;
-}
 
-static inline int __netdev_upper_dev_insert(struct net_device *dev,
-                                           struct net_device *udev,
-                                           bool master, bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
-                                           true);
-}
+remove_symlinks:
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
 
-static inline int __netdev_lower_dev_insert(struct net_device *dev,
-                                           struct net_device *ldev,
-                                           bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
-                                           false);
+free_adj:
+       kfree(adj);
+
+       return ret;
 }
 
 void __netdev_adjacent_dev_remove(struct net_device *dev,
-                                 struct net_device *adj_dev, bool upper)
+                                 struct net_device *adj_dev,
+                                 struct list_head *dev_list)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
 
-       if (upper)
-               adj = __netdev_find_upper(dev, adj_dev);
-       else
-               adj = __netdev_find_lower(dev, adj_dev);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
-       if (!adj)
+       if (!adj) {
+               pr_err("tried to remove device %s from %s\n",
+                      dev->name, adj_dev->name);
                BUG();
+       }
 
        if (adj->ref_nr > 1) {
+               pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+                        adj->ref_nr-1);
                adj->ref_nr--;
                return;
        }
 
+       if (adj->master)
+               sysfs_remove_link(&(dev->dev.kobj), "master");
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
+
        list_del_rcu(&adj->list);
-       pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
+       pr_debug("dev_put for %s, because link removed from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
        dev_put(adj_dev);
        kfree_rcu(adj, rcu);
 }
 
-static inline void __netdev_upper_dev_remove(struct net_device *dev,
-                                            struct net_device *udev)
-{
-       return __netdev_adjacent_dev_remove(dev, udev, true);
-}
-
-static inline void __netdev_lower_dev_remove(struct net_device *dev,
-                                            struct net_device *ldev)
-{
-       return __netdev_adjacent_dev_remove(dev, ldev, false);
-}
-
-int __netdev_adjacent_dev_insert_link(struct net_device *dev,
-                                     struct net_device *upper_dev,
-                                     bool master, bool neighbour)
+int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+                                    struct net_device *upper_dev,
+                                    struct list_head *up_list,
+                                    struct list_head *down_list,
+                                    void *private, bool master)
 {
        int ret;
 
-       ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+       ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+                                          master);
        if (ret)
                return ret;
 
-       ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+       ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+                                          false);
        if (ret) {
-               __netdev_upper_dev_remove(dev, upper_dev);
+               __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
                return ret;
        }
 
        return 0;
 }
 
-static inline int __netdev_adjacent_dev_link(struct net_device *dev,
-                                            struct net_device *udev)
+int __netdev_adjacent_dev_link(struct net_device *dev,
+                              struct net_device *upper_dev)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+       return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                               &dev->all_adj_list.upper,
+                                               &upper_dev->all_adj_list.lower,
+                                               NULL, false);
 }
 
-static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
-                                                      struct net_device *udev,
-                                                      bool master)
+void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+                                       struct net_device *upper_dev,
+                                       struct list_head *up_list,
+                                       struct list_head *down_list)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+       __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+       __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
 }
 
 void __netdev_adjacent_dev_unlink(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       __netdev_upper_dev_remove(dev, upper_dev);
-       __netdev_lower_dev_remove(upper_dev, dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->all_adj_list.upper,
+                                          &upper_dev->all_adj_list.lower);
 }
 
+int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private, bool master)
+{
+       int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+
+       if (ret)
+               return ret;
+
+       ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                              &dev->adj_list.upper,
+                                              &upper_dev->adj_list.lower,
+                                              private, master);
+       if (ret) {
+               __netdev_adjacent_dev_unlink(dev, upper_dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+                                           struct net_device *upper_dev)
+{
+       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->adj_list.upper,
+                                          &upper_dev->adj_list.lower);
+}
 
 static int __netdev_upper_dev_link(struct net_device *dev,
-                                  struct net_device *upper_dev, bool master)
+                                  struct net_device *upper_dev, bool master,
+                                  void *private)
 {
        struct netdev_adjacent *i, *j, *to_i, *to_j;
        int ret = 0;
@@ -4661,26 +4784,29 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (__netdev_find_upper(upper_dev, dev))
+       if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
                return -EBUSY;
 
-       if (__netdev_find_upper(dev, upper_dev))
+       if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
                return -EEXIST;
 
        if (master && netdev_master_upper_dev_get(dev))
                return -EBUSY;
 
-       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+                                                  master);
        if (ret)
                return ret;
 
        /* Now that we linked these devs, make all the upper_dev's
-        * upper_dev_list visible to every dev's lower_dev_list and vice
+        * all_adj_list.upper visible to every dev's all_adj_list.lower an
         * versa, and don't forget the devices itself. All of these
         * links are non-neighbours.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+                       pr_debug("Interlinking %s with %s, non-neighbour\n",
+                                i->dev->name, j->dev->name);
                        ret = __netdev_adjacent_dev_link(i->dev, j->dev);
                        if (ret)
                                goto rollback_mesh;
@@ -4688,14 +4814,18 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        }
 
        /* add dev to every upper_dev's upper device */
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+               pr_debug("linking %s's upper device %s with %s\n",
+                        upper_dev->name, i->dev->name, dev->name);
                ret = __netdev_adjacent_dev_link(dev, i->dev);
                if (ret)
                        goto rollback_upper_mesh;
        }
 
        /* add upper_dev to every dev's lower device */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               pr_debug("linking %s's lower device %s with %s\n", dev->name,
+                        i->dev->name, upper_dev->name);
                ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
                if (ret)
                        goto rollback_lower_mesh;
@@ -4706,7 +4836,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
 
 rollback_lower_mesh:
        to_i = i;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
@@ -4716,7 +4846,7 @@ rollback_lower_mesh:
 
 rollback_upper_mesh:
        to_i = i;
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(dev, i->dev);
@@ -4727,8 +4857,8 @@ rollback_upper_mesh:
 rollback_mesh:
        to_i = i;
        to_j = j;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
                        if (i == to_i && j == to_j)
                                break;
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
@@ -4737,7 +4867,7 @@ rollback_mesh:
                        break;
        }
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        return ret;
 }
@@ -4755,7 +4885,7 @@ rollback_mesh:
 int netdev_upper_dev_link(struct net_device *dev,
                          struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, false);
+       return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_link);
 
@@ -4773,10 +4903,18 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
 int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, true);
+       return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_link);
 
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private)
+{
+       return __netdev_upper_dev_link(dev, upper_dev, true, private);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
+
 /**
  * netdev_upper_dev_unlink - Removes a link to upper device
  * @dev: device
@@ -4791,29 +4929,59 @@ void netdev_upper_dev_unlink(struct net_device *dev,
        struct netdev_adjacent *i, *j;
        ASSERT_RTNL();
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        /* Here is the tricky part. We must remove all dev's lower
         * devices from all upper_dev's upper devices and vice
         * versa, to maintain the graph relationship.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
 
        /* remove also the devices itself from lower/upper device
         * list
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
 
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
                __netdev_adjacent_dev_unlink(dev, i->dev);
 
        call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
+
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -4822,7 +4990,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
                ops->ndo_change_rx_flags(dev, flags);
 }
 
-static int __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
 {
        unsigned int old_flags = dev->flags;
        kuid_t uid;
@@ -4865,6 +5033,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
 
                dev_change_rx_flags(dev, IFF_PROMISC);
        }
+       if (notify)
+               __dev_notify_flags(dev, old_flags, IFF_PROMISC);
        return 0;
 }
 
@@ -4884,7 +5054,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
        unsigned int old_flags = dev->flags;
        int err;
 
-       err = __dev_set_promiscuity(dev, inc);
+       err = __dev_set_promiscuity(dev, inc, true);
        if (err < 0)
                return err;
        if (dev->flags != old_flags)
@@ -4893,22 +5063,9 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
 }
 EXPORT_SYMBOL(dev_set_promiscuity);
 
-/**
- *     dev_set_allmulti        - update allmulti count on a device
- *     @dev: device
- *     @inc: modifier
- *
- *     Add or remove reception of all multicast frames to a device. While the
- *     count in the device remains above zero the interface remains listening
- *     to all interfaces. Once it hits zero the device reverts back to normal
- *     filtering operation. A negative @inc value is used to drop the counter
- *     when releasing a resource needing all multicasts.
- *     Return 0 if successful or a negative errno code on error.
- */
-
-int dev_set_allmulti(struct net_device *dev, int inc)
+static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
 {
-       unsigned int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
 
        ASSERT_RTNL();
 
@@ -4931,9 +5088,30 @@ int dev_set_allmulti(struct net_device *dev, int inc)
        if (dev->flags ^ old_flags) {
                dev_change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
+               if (notify)
+                       __dev_notify_flags(dev, old_flags,
+                                          dev->gflags ^ old_gflags);
        }
        return 0;
 }
+
+/**
+ *     dev_set_allmulti        - update allmulti count on a device
+ *     @dev: device
+ *     @inc: modifier
+ *
+ *     Add or remove reception of all multicast frames to a device. While the
+ *     count in the device remains above zero the interface remains listening
+ *     to all interfaces. Once it hits zero the device reverts back to normal
+ *     filtering operation. A negative @inc value is used to drop the counter
+ *     when releasing a resource needing all multicasts.
+ *     Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+       return __dev_set_allmulti(dev, inc, true);
+}
 EXPORT_SYMBOL(dev_set_allmulti);
 
 /*
@@ -4958,10 +5136,10 @@ void __dev_set_rx_mode(struct net_device *dev)
                 * therefore calling __dev_set_promiscuity here is safe.
                 */
                if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, 1);
+                       __dev_set_promiscuity(dev, 1, false);
                        dev->uc_promisc = true;
                } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, -1);
+                       __dev_set_promiscuity(dev, -1, false);
                        dev->uc_promisc = false;
                }
        }
@@ -5050,9 +5228,13 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
 
        if ((flags ^ dev->gflags) & IFF_PROMISC) {
                int inc = (flags & IFF_PROMISC) ? 1 : -1;
+               unsigned int old_flags = dev->flags;
 
                dev->gflags ^= IFF_PROMISC;
-               dev_set_promiscuity(dev, inc);
+
+               if (__dev_set_promiscuity(dev, inc, false) >= 0)
+                       if (dev->flags != old_flags)
+                               dev_set_rx_mode(dev);
        }
 
        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
@@ -5063,16 +5245,20 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
 
                dev->gflags ^= IFF_ALLMULTI;
-               dev_set_allmulti(dev, inc);
+               __dev_set_allmulti(dev, inc, false);
        }
 
        return ret;
 }
 
-void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+                       unsigned int gchanges)
 {
        unsigned int changes = dev->flags ^ old_flags;
 
+       if (gchanges)
+               rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges);
+
        if (changes & IFF_UP) {
                if (dev->flags & IFF_UP)
                        call_netdevice_notifiers(NETDEV_UP, dev);
@@ -5101,17 +5287,14 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
 int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
        int ret;
-       unsigned int changes, old_flags = dev->flags;
+       unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
                return ret;
 
-       changes = old_flags ^ dev->flags;
-       if (changes)
-               rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-
-       __dev_notify_flags(dev, old_flags);
+       changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
+       __dev_notify_flags(dev, old_flags, changes);
        return ret;
 }
 EXPORT_SYMBOL(dev_change_flags);
@@ -5247,15 +5430,18 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
+static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
        list_add_tail(&dev->todo_list, &net_todo_list);
+       dev_net(dev)->dev_unreg_count++;
 }
 
 static void rollback_registered_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
+       LIST_HEAD(close_head);
 
        BUG_ON(dev_boot_phase);
        ASSERT_RTNL();
@@ -5278,7 +5464,9 @@ static void rollback_registered_many(struct list_head *head)
        }
 
        /* If device is running, close it first. */
-       dev_close_many(head);
+       list_for_each_entry(dev, head, unreg_list)
+               list_add_tail(&dev->close_list, &close_head);
+       dev_close_many(&close_head);
 
        list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
@@ -5918,6 +6106,12 @@ void netdev_run_todo(void)
                if (dev->destructor)
                        dev->destructor(dev);
 
+               /* Report a network device has been unregistered */
+               rtnl_lock();
+               dev_net(dev)->dev_unreg_count--;
+               __rtnl_unlock();
+               wake_up(&netdev_unregistering_wq);
+
                /* Free network device */
                kobject_put(&dev->dev.kobj);
        }
@@ -6068,9 +6262,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
+       INIT_LIST_HEAD(&dev->close_list);
        INIT_LIST_HEAD(&dev->link_watch_list);
-       INIT_LIST_HEAD(&dev->upper_dev_list);
-       INIT_LIST_HEAD(&dev->lower_dev_list);
+       INIT_LIST_HEAD(&dev->adj_list.upper);
+       INIT_LIST_HEAD(&dev->adj_list.lower);
+       INIT_LIST_HEAD(&dev->all_adj_list.upper);
+       INIT_LIST_HEAD(&dev->all_adj_list.lower);
        dev->priv_flags = IFF_XMIT_DST_RELEASE;
        setup(dev);
 
@@ -6603,6 +6800,34 @@ static void __net_exit default_device_exit(struct net *net)
        rtnl_unlock();
 }
 
+static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
+{
+       /* Return with the rtnl_lock held when there are no network
+        * devices unregistering in any network namespace in net_list.
+        */
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               list_for_each_entry(net, net_list, exit_list) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 static void __net_exit default_device_exit_batch(struct list_head *net_list)
 {
        /* At exit all network devices most be removed from a network
@@ -6614,7 +6839,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
        struct net *net;
        LIST_HEAD(dev_kill_list);
 
-       rtnl_lock();
+       /* To prevent network device cleanup code from dereferencing
+        * loopback devices or network devices that have been freed
+        * wait here for all pending unregistrations to complete,
+        * before unregistring the loopback device and allowing the
+        * network namespace be freed.
+        *
+        * The netdev todo list containing all network devices
+        * unregistrations that happen in default_device_exit_batch
+        * will run in the rtnl_unlock() at the end of
+        * default_device_exit_batch.
+        */
+       rtnl_lock_unregistering(net_list);
        list_for_each_entry(net, net_list, exit_list) {
                for_each_netdev_reverse(net, dev) {
                        if (dev->rtnl_link_ops)
index 78e9d9223e40dfe5bc9956170619e0a8e3335e9d..862989898f611e8f9f2dea28b273b551eeb2b0bb 100644 (file)
@@ -81,6 +81,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
        [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
+       [NETIF_F_GSO_IPIP_BIT] =         "tx-ipip-segmentation",
+       [NETIF_F_GSO_SIT_BIT] =          "tx-sit-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
        [NETIF_F_GSO_MPLS_BIT] =         "tx-mpls-segmentation",
 
index 6438f29ff26650b240be40d7d80953dc28f13cb0..01b780856db29e5a2a7ebbc36aa7062340b28bb8 100644 (file)
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        bpf_jit_free(fp);
-       kfree(fp);
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+       fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
        unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+       unsigned int sk_fsize = sk_filter_size(fprog->len);
        int err;
 
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+       fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
-               sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+               sock_kfree_s(sk, fp, sk_fsize);
                return -EFAULT;
        }
 
index 1929af87b2609650d0b484e7d01dc4a405ba8389..f8e25ac41c6c1b7b2eebadc6ec8d8310c458bfee 100644 (file)
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
        memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
 }
 
+/**
+ * skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: buffer to extract the ports from
+ * @thoff: transport header offset
+ * @ip_proto: protocol for which to get port offset
+ *
+ * The function will try to retrieve the ports at offset thoff + poff where poff
+ * is the protocol port offset returned from proto_ports_offset
+ */
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+       int poff = proto_ports_offset(ip_proto);
+
+       if (poff >= 0) {
+               __be32 *ports, _ports;
+
+               ports = skb_header_pointer(skb, thoff + poff,
+                                          sizeof(_ports), &_ports);
+               if (ports)
+                       return *ports;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(skb_flow_get_ports);
+
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 {
-       int poff, nhoff = skb_network_offset(skb);
+       int nhoff = skb_network_offset(skb);
        u8 ip_proto;
        __be16 proto = skb->protocol;
 
@@ -150,16 +176,7 @@ ipv6:
        }
 
        flow->ip_proto = ip_proto;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               __be32 *ports, _ports;
-
-               nhoff += poff;
-               ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
-               if (ports)
-                       flow->ports = *ports;
-       }
-
+       flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
        flow->thoff = (u16) nhoff;
 
        return true;
index 6072610a8672d1a54a0e7618214f70610fa2a6fa..ca15f32821fb8d536586354108488050b7cc6a0e 100644 (file)
@@ -867,7 +867,7 @@ static void neigh_invalidate(struct neighbour *neigh)
 static void neigh_probe(struct neighbour *neigh)
        __releases(neigh->lock)
 {
-       struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+       struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
        /* keep skb alive even if arp_queue overflows */
        if (skb)
                skb = skb_copy(skb, GFP_ATOMIC);
index d9cd627e6a16a55bb9b6e8ec1619a0291e1621ab..9b7cf6c85f82447d3e03f33cd2c599de13ed3644 100644 (file)
@@ -222,11 +222,10 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
                            struct cgroup_taskset *tset)
 {
        struct task_struct *p;
-       void *v;
+       void *v = (void *)(unsigned long)css->cgroup->id;
 
        cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               v = (void *)(unsigned long)task_netprioidx(p);
                iterate_fd(p->files, 0, update_netprio, v);
                task_unlock(p);
        }
index 2a0e21de3060cddbc9cd657acb24ee84b748c4d1..4aedf03da0521433f16b470f3e675adcd4efdf25 100644 (file)
@@ -1647,9 +1647,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
        }
 
        dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-       rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
 
-       __dev_notify_flags(dev, old_flags);
+       __dev_notify_flags(dev, old_flags, ~0U);
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index 6a2f13cee86a0c3f34f69832d4b6d93592fd5327..897da56f3affae13161d4610989667a82244c308 100644 (file)
@@ -7,15 +7,20 @@
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
 #include <linux/string.h>
+#include <linux/net.h>
 
 #include <net/secure_seq.h>
 
-static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
+#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
-void net_secret_init(void)
+static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
+
+static __always_inline void net_secret_init(void)
 {
-       get_random_bytes(net_secret, sizeof(net_secret));
+       net_get_random_once(net_secret, sizeof(net_secret));
 }
+#endif
 
 #ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
@@ -42,6 +47,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +69,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +89,7 @@ __u32 secure_ip_id(__be32 daddr)
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force __u32) daddr;
        hash[1] = net_secret[13];
        hash[2] = net_secret[14];
@@ -96,6 +104,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
 {
        __u32 hash[4];
 
+       net_secret_init();
        memcpy(hash, daddr, 16);
        md5_transform(hash, net_secret);
 
@@ -107,6 +116,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +131,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +151,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u64 seq;
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +176,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
        u64 seq;
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + daddr[i];
index d81cff119f734a5c8f405fed01f4047b9169f5d1..0ab32faa520f30bb16dfce78d6c17a27e61c1c59 100644 (file)
@@ -903,6 +903,9 @@ EXPORT_SYMBOL(skb_clone);
 
 static void skb_headers_offset_update(struct sk_buff *skb, int off)
 {
+       /* Only adjust this if it actually is csum_start rather than csum */
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += off;
        /* {transport,network,mac}_header and tail are relative to skb->head */
        skb->transport_header += off;
        skb->network_header   += off;
@@ -1109,9 +1112,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 #endif
        skb->tail             += off;
        skb_headers_offset_update(skb, nhead);
-       /* Only adjust this if it actually is csum_start rather than csum */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               skb->csum_start += nhead;
        skb->cloned   = 0;
        skb->hdr_len  = 0;
        skb->nohdr    = 0;
@@ -1176,7 +1176,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                                        NUMA_NO_NODE);
        int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
-       int off;
 
        if (!n)
                return NULL;
@@ -1200,11 +1199,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        copy_skb_header(n, skb);
 
-       off                  = newheadroom - oldheadroom;
-       if (n->ip_summed == CHECKSUM_PARTIAL)
-               n->csum_start += off;
-
-       skb_headers_offset_update(n, off);
+       skb_headers_offset_update(n, newheadroom - oldheadroom);
 
        return n;
 }
@@ -2837,14 +2832,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               /* nskb and skb might have different headroom */
-               if (nskb->ip_summed == CHECKSUM_PARTIAL)
-                       nskb->csum_start += skb_headroom(nskb) - headroom;
-
-               skb_reset_mac_header(nskb);
-               skb_set_network_header(nskb, skb->mac_len);
-               nskb->transport_header = (nskb->network_header +
-                                         skb_network_header_len(skb));
+               skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
 
                skb_copy_from_linear_data_offset(skb, -tnl_hlen,
                                                 nskb->data - tnl_hlen,
@@ -2936,32 +2924,30 @@ EXPORT_SYMBOL_GPL(skb_segment);
 
 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
-       struct sk_buff *p = *head;
-       struct sk_buff *nskb;
-       struct skb_shared_info *skbinfo = skb_shinfo(skb);
-       struct skb_shared_info *pinfo = skb_shinfo(p);
-       unsigned int headroom;
-       unsigned int len = skb_gro_len(skb);
+       struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
+       struct sk_buff *nskb, *lp, *p = *head;
+       unsigned int len = skb_gro_len(skb);
        unsigned int delta_truesize;
+       unsigned int headroom;
 
-       if (p->len + len >= 65536)
+       if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       if (pinfo->frag_list)
-               goto merge;
-       else if (headlen <= offset) {
+       lp = NAPI_GRO_CB(p)->last ?: p;
+       pinfo = skb_shinfo(lp);
+
+       if (headlen <= offset) {
                skb_frag_t *frag;
                skb_frag_t *frag2;
                int i = skbinfo->nr_frags;
                int nr_frags = pinfo->nr_frags + i;
 
-               offset -= headlen;
-
                if (nr_frags > MAX_SKB_FRAGS)
-                       return -E2BIG;
+                       goto merge;
 
+               offset -= headlen;
                pinfo->nr_frags = nr_frags;
                skbinfo->nr_frags = 0;
 
@@ -2992,7 +2978,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                unsigned int first_offset;
 
                if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
-                       return -E2BIG;
+                       goto merge;
 
                first_offset = skb->data -
                               (unsigned char *)page_address(page) +
@@ -3010,7 +2996,10 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
-       } else if (skb_gro_len(p) != pinfo->gso_size)
+       }
+       if (pinfo->frag_list)
+               goto merge;
+       if (skb_gro_len(p) != pinfo->gso_size)
                return -E2BIG;
 
        headroom = skb_headroom(p);
@@ -3062,16 +3051,24 @@ merge:
 
        __skb_pull(skb, offset);
 
-       NAPI_GRO_CB(p)->last->next = skb;
+       if (!NAPI_GRO_CB(p)->last)
+               skb_shinfo(p)->frag_list = skb;
+       else
+               NAPI_GRO_CB(p)->last->next = skb;
        NAPI_GRO_CB(p)->last = skb;
        skb_header_release(skb);
+       lp = p;
 
 done:
        NAPI_GRO_CB(p)->count++;
        p->data_len += len;
        p->truesize += delta_truesize;
        p->len += len;
-
+       if (lp != p) {
+               lp->data_len += len;
+               lp->truesize += delta_truesize;
+               lp->len += len;
+       }
        NAPI_GRO_CB(skb)->same_flow = 1;
        return 0;
 }
index 5b6beba494a350cb28adfc7724487f1a13e6c011..ab20ed9b0f31da64cb118cf645fa88f5787b5571 100644 (file)
@@ -475,12 +475,6 @@ discard_and_relse:
 }
 EXPORT_SYMBOL(sk_receive_skb);
 
-void sk_reset_txq(struct sock *sk)
-{
-       sk_tx_queue_clear(sk);
-}
-EXPORT_SYMBOL(sk_reset_txq);
-
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
        struct dst_entry *dst = __sk_dst_get(sk);
@@ -914,6 +908,13 @@ set_rcvbuf:
                }
                break;
 #endif
+
+       case SO_MAX_PACING_RATE:
+               sk->sk_max_pacing_rate = val;
+               sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+                                        sk->sk_max_pacing_rate);
+               break;
+
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -1177,6 +1178,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 #endif
 
+       case SO_MAX_PACING_RATE:
+               v.val = sk->sk_max_pacing_rate;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -1836,7 +1841,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
 
-bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+/**
+ * skb_page_frag_refill - check that a page_frag contains enough room
+ * @sz: minimum size of the fragment we want to get
+ * @pfrag: pointer to page_frag
+ * @prio: priority for memory allocation
+ *
+ * Note: While this allocator tries to use high order pages, there is
+ * no guarantee that allocations succeed. Therefore, @sz MUST be
+ * less or equal than PAGE_SIZE.
+ */
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
 {
        int order;
 
@@ -1845,16 +1860,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
                        pfrag->offset = 0;
                        return true;
                }
-               if (pfrag->offset < pfrag->size)
+               if (pfrag->offset + sz <= pfrag->size)
                        return true;
                put_page(pfrag->page);
        }
 
        /* We restrict high order allocations to users that can afford to wait */
-       order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+       order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
 
        do {
-               gfp_t gfp = sk->sk_allocation;
+               gfp_t gfp = prio;
 
                if (order)
                        gfp |= __GFP_COMP | __GFP_NOWARN;
@@ -1866,6 +1881,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
                }
        } while (--order >= 0);
 
+       return false;
+}
+EXPORT_SYMBOL(skb_page_frag_refill);
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+       if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
+               return true;
+
        sk_enter_memory_pressure(sk);
        sk_stream_moderate_sndbuf(sk);
        return false;
@@ -2319,6 +2343,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_ll_usec          =       sysctl_net_busy_read;
 #endif
 
+       sk->sk_max_pacing_rate = ~0U;
+       sk->sk_pacing_rate = ~0U;
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
index aa88e23fc87aa44f95f8794cf234846d8d06d21d..bf09371e19b146369a6c580e52700588440adf49 100644 (file)
@@ -338,3 +338,51 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                                  csum_unfold(*sum)));
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
+
+struct __net_random_once_work {
+       struct work_struct work;
+       struct static_key *key;
+};
+
+static void __net_random_once_deferred(struct work_struct *w)
+{
+       struct __net_random_once_work *work =
+               container_of(w, struct __net_random_once_work, work);
+       if (!static_key_enabled(work->key))
+               static_key_slow_inc(work->key);
+       kfree(work);
+}
+
+static void __net_random_once_disable_jump(struct static_key *key)
+{
+       struct __net_random_once_work *w;
+
+       w = kmalloc(sizeof(*w), GFP_ATOMIC);
+       if (!w)
+               return;
+
+       INIT_WORK(&w->work, __net_random_once_deferred);
+       w->key = key;
+       schedule_work(&w->work);
+}
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+                          struct static_key *done_key)
+{
+       static DEFINE_SPINLOCK(lock);
+
+       spin_lock_bh(&lock);
+       if (*done) {
+               spin_unlock_bh(&lock);
+               return false;
+       }
+
+       get_random_bytes(buf, nbytes);
+       *done = true;
+       spin_unlock_bh(&lock);
+
+       __net_random_once_disable_jump(done_key);
+
+       return true;
+}
+EXPORT_SYMBOL(__net_get_random_once);
index a269aa7f7923bf2b7f7b9d1dd3b26030456fc47c..3284bfa988c0ea45c0c46c4b5c7e9a80528bd0b0 100644 (file)
@@ -101,16 +101,16 @@ struct dccp_ackvec_record {
        u8               avr_ack_nonce:1;
 };
 
-extern int dccp_ackvec_init(void);
-extern void dccp_ackvec_exit(void);
+int dccp_ackvec_init(void);
+void dccp_ackvec_exit(void);
 
-extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
-extern void dccp_ackvec_free(struct dccp_ackvec *av);
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
+void dccp_ackvec_free(struct dccp_ackvec *av);
 
-extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
-extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
-extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
-extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
 static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
@@ -133,7 +133,6 @@ struct dccp_ackvec_parsed {
        struct list_head node;
 };
 
-extern int dccp_ackvec_parsed_add(struct list_head *head,
-                                 u8 *vec, u8 len, u8 nonce);
-extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce);
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
 #endif /* _ACKVEC_H */
index fb85d371a8dec875a01205b22ef6ddef5e99d511..6eb837a47b5c42f8c73c498525fe4d269fc5b97d 100644 (file)
@@ -93,8 +93,8 @@ extern struct ccid_operations ccid2_ops;
 extern struct ccid_operations ccid3_ops;
 #endif
 
-extern int  ccid_initialize_builtins(void);
-extern void ccid_cleanup_builtins(void);
+int ccid_initialize_builtins(void);
+void ccid_cleanup_builtins(void);
 
 struct ccid {
        struct ccid_operations *ccid_ops;
@@ -106,12 +106,12 @@ static inline void *ccid_priv(const struct ccid *ccid)
        return (void *)ccid->ccid_priv;
 }
 
-extern bool ccid_support_check(u8 const *ccid_array, u8 array_len);
-extern int  ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
-extern int  ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
-                                         char __user *, int __user *);
+bool ccid_support_check(u8 const *ccid_array, u8 array_len);
+int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
+int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
+                                 char __user *, int __user *);
 
-extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
+struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
 
 static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
 {
@@ -131,8 +131,8 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
        return ccid->ccid_ops->ccid_id;
 }
 
-extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
-extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
 
 /*
  * Congestion control of queued data packets via CCID decision.
index d1d2f5383b7d3d7cec533be0d7b8d939ebe492dd..57f631a86ccd35c9a7a949003091909330971cb3 100644 (file)
@@ -65,9 +65,9 @@ static inline u8 tfrc_lh_length(struct tfrc_loss_hist *lh)
 
 struct tfrc_rx_hist;
 
-extern int  tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
-                                u32 (*first_li)(struct sock *), struct sock *);
-extern u8   tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
-extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
+int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
+                        u32 (*first_li)(struct sock *), struct sock *);
+u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
+void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
 
 #endif /* _DCCP_LI_HIST_ */
index 7ee4a9d9d3352337bb5e899d214a754b740ac8c0..ee362b0b630ddde5a46242dc4b86c2213efe581c 100644 (file)
@@ -60,8 +60,8 @@ static inline struct tfrc_tx_hist_entry *
        return head;
 }
 
-extern int  tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
-extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
+int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
+void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
 
 /* Subtraction a-b modulo-16, respects circular wrap-around */
 #define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
@@ -139,20 +139,17 @@ static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h)
        return h->loss_count > 0;
 }
 
-extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
-                                   const struct sk_buff *skb, const u64 ndp);
+void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb,
+                            const u64 ndp);
 
-extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
+int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
 
 struct tfrc_loss_hist;
-extern int  tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
-                               struct tfrc_loss_hist *lh,
-                               struct sk_buff *skb, const u64 ndp,
-                               u32 (*first_li)(struct sock *sk),
-                               struct sock *sk);
-extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
-                                  const struct sk_buff *skb);
-extern int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
-extern void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
+int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, struct tfrc_loss_hist *lh,
+                       struct sk_buff *skb, const u64 ndp,
+                       u32 (*first_li)(struct sock *sk), struct sock *sk);
+u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb);
+int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
+void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
 
 #endif /* _DCCP_PKT_HIST_ */
index ed698c42a5fbef17b34a9a4daf6fc34464d914f5..40ee7d62b6520d7daf2a1a8f3b54bda9cfbedfc2 100644 (file)
@@ -55,21 +55,21 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
        return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval;
 }
 
-extern u32  tfrc_calc_x(u16 s, u32 R, u32 p);
-extern u32  tfrc_calc_x_reverse_lookup(u32 fvalue);
-extern u32  tfrc_invert_loss_event_rate(u32 loss_event_rate);
+u32 tfrc_calc_x(u16 s, u32 R, u32 p);
+u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
 
-extern int  tfrc_tx_packet_history_init(void);
-extern void tfrc_tx_packet_history_exit(void);
-extern int  tfrc_rx_packet_history_init(void);
-extern void tfrc_rx_packet_history_exit(void);
+int tfrc_tx_packet_history_init(void);
+void tfrc_tx_packet_history_exit(void);
+int tfrc_rx_packet_history_init(void);
+void tfrc_rx_packet_history_exit(void);
 
-extern int  tfrc_li_init(void);
-extern void tfrc_li_exit(void);
+int tfrc_li_init(void);
+void tfrc_li_exit(void);
 
 #ifdef CONFIG_IP_DCCP_TFRC_LIB
-extern int  tfrc_lib_init(void);
-extern void tfrc_lib_exit(void);
+int tfrc_lib_init(void);
+void tfrc_lib_exit(void);
 #else
 #define tfrc_lib_init() (0)
 #define tfrc_lib_exit()
index 708e75bf623dea1f78dcd844a1b1af5cd6928d09..30948784dd58ff0a35df38b075c3bebd53f90cbe 100644 (file)
@@ -53,7 +53,7 @@ extern struct inet_hashinfo dccp_hashinfo;
 
 extern struct percpu_counter dccp_orphan_count;
 
-extern void dccp_time_wait(struct sock *sk, int state, int timeo);
+void dccp_time_wait(struct sock *sk, int state, int timeo);
 
 /*
  *  Set safe upper bounds for header and option length. Since Data Offset is 8
@@ -224,114 +224,108 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
        skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
 }
 
-extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 
-extern int  dccp_retransmit_skb(struct sock *sk);
+int dccp_retransmit_skb(struct sock *sk);
 
-extern void dccp_send_ack(struct sock *sk);
-extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
-                               struct request_sock *rsk);
+void dccp_send_ack(struct sock *sk);
+void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+                        struct request_sock *rsk);
 
-extern void dccp_send_sync(struct sock *sk, const u64 seq,
-                          const enum dccp_pkt_type pkt_type);
+void dccp_send_sync(struct sock *sk, const u64 seq,
+                   const enum dccp_pkt_type pkt_type);
 
 /*
  * TX Packet Dequeueing Interface
  */
-extern void            dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
-extern bool            dccp_qpolicy_full(struct sock *sk);
-extern void            dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
-extern struct sk_buff  *dccp_qpolicy_top(struct sock *sk);
-extern struct sk_buff  *dccp_qpolicy_pop(struct sock *sk);
-extern bool            dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+bool dccp_qpolicy_full(struct sock *sk);
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *dccp_qpolicy_top(struct sock *sk);
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
 
 /*
  * TX Packet Output and TX Timers
  */
-extern void   dccp_write_xmit(struct sock *sk);
-extern void   dccp_write_space(struct sock *sk);
-extern void   dccp_flush_write_queue(struct sock *sk, long *time_budget);
+void dccp_write_xmit(struct sock *sk);
+void dccp_write_space(struct sock *sk);
+void dccp_flush_write_queue(struct sock *sk, long *time_budget);
 
-extern void dccp_init_xmit_timers(struct sock *sk);
+void dccp_init_xmit_timers(struct sock *sk);
 static inline void dccp_clear_xmit_timers(struct sock *sk)
 {
        inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
 
-extern const char *dccp_packet_name(const int type);
+const char *dccp_packet_name(const int type);
 
-extern void dccp_set_state(struct sock *sk, const int state);
-extern void dccp_done(struct sock *sk);
+void dccp_set_state(struct sock *sk, const int state);
+void dccp_done(struct sock *sk);
 
-extern int  dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
-                           struct sk_buff const *skb);
+int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
+                   struct sk_buff const *skb);
 
-extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_create_openreq_child(struct sock *sk,
-                                             const struct request_sock *req,
-                                             const struct sk_buff *skb);
+struct sock *dccp_create_openreq_child(struct sock *sk,
+                                      const struct request_sock *req,
+                                      const struct sk_buff *skb);
 
-extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
-                                             struct sk_buff *skb,
-                                             struct request_sock *req,
-                                             struct dst_entry *dst);
-extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
-                                  struct request_sock *req,
-                                  struct request_sock **prev);
+struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+                                      struct request_sock *req,
+                                      struct dst_entry *dst);
+struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+                           struct request_sock *req,
+                           struct request_sock **prev);
 
-extern int dccp_child_process(struct sock *parent, struct sock *child,
-                             struct sk_buff *skb);
-extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-                                 struct dccp_hdr *dh, unsigned int len);
-extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-                               const struct dccp_hdr *dh, const unsigned int len);
+int dccp_child_process(struct sock *parent, struct sock *child,
+                      struct sk_buff *skb);
+int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+                          struct dccp_hdr *dh, unsigned int len);
+int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+                        const struct dccp_hdr *dh, const unsigned int len);
 
-extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
-extern void dccp_destroy_sock(struct sock *sk);
+int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+void dccp_destroy_sock(struct sock *sk);
 
-extern void            dccp_close(struct sock *sk, long timeout);
-extern struct sk_buff  *dccp_make_response(struct sock *sk,
-                                           struct dst_entry *dst,
-                                           struct request_sock *req);
+void dccp_close(struct sock *sk, long timeout);
+struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+                                  struct request_sock *req);
 
-extern int        dccp_connect(struct sock *sk);
-extern int        dccp_disconnect(struct sock *sk, int flags);
-extern int        dccp_getsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int __user *optlen);
-extern int        dccp_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, unsigned int optlen);
+int dccp_connect(struct sock *sk);
+int dccp_disconnect(struct sock *sk, int flags);
+int dccp_getsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, int __user *optlen);
+int dccp_setsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int        compat_dccp_getsockopt(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, int __user *optlen);
-extern int        compat_dccp_setsockopt(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, unsigned int optlen);
+int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, int __user *optlen);
+int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, unsigned int optlen);
 #endif
-extern int        dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int        dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                               struct msghdr *msg, size_t size);
-extern int        dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
-                               struct msghdr *msg, size_t len, int nonblock,
-                               int flags, int *addr_len);
-extern void       dccp_shutdown(struct sock *sk, int how);
-extern int        inet_dccp_listen(struct socket *sock, int backlog);
-extern unsigned int dccp_poll(struct file *file, struct socket *sock,
-                            poll_table *wait);
-extern int        dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-                                  int addr_len);
-
-extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int        dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
-extern void       dccp_send_close(struct sock *sk, const int active);
-extern int        dccp_invalid_packet(struct sk_buff *skb);
-extern u32        dccp_sample_rtt(struct sock *sk, long delta);
+int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                size_t size);
+int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
+                struct msghdr *msg, size_t len, int nonblock, int flags,
+                int *addr_len);
+void dccp_shutdown(struct sock *sk, int how);
+int inet_dccp_listen(struct socket *sock, int backlog);
+unsigned int dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
+int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
+int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
+void dccp_send_close(struct sock *sk, const int active);
+int dccp_invalid_packet(struct sk_buff *skb);
+u32 dccp_sample_rtt(struct sock *sk, long delta);
 
 static inline int dccp_bad_service_code(const struct sock *sk,
                                        const __be32 service)
@@ -475,25 +469,25 @@ static inline int dccp_ack_pending(const struct sock *sk)
        return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
-extern int  dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
-extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
-extern int  dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
-extern int  dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
-                                 struct sk_buff *skb);
-extern int  dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
-extern void dccp_feat_list_purge(struct list_head *fn_list);
-
-extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
-extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
-extern u32 dccp_timestamp(void);
-extern void dccp_timestamping_init(void);
-extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
-                             const void *value, unsigned char len);
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
+int dccp_feat_finalise_settings(struct dccp_sock *dp);
+int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
+int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
+                         struct sk_buff *skb);
+int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
+void dccp_feat_list_purge(struct list_head *fn_list);
+
+int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
+int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *);
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
+u32 dccp_timestamp(void);
+void dccp_timestamping_init(void);
+int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+                      const void *value, unsigned char len);
 
 #ifdef CONFIG_SYSCTL
-extern int dccp_sysctl_init(void);
-extern void dccp_sysctl_exit(void);
+int dccp_sysctl_init(void);
+void dccp_sysctl_exit(void);
 #else
 static inline int dccp_sysctl_init(void)
 {
index 90b957d34d2602987d49175ef483af3463f9c523..0e75cebb2187ef41c5e06c0e18e9c89b73a27cd2 100644 (file)
@@ -107,13 +107,13 @@ extern unsigned long sysctl_dccp_sequence_window;
 extern int          sysctl_dccp_rx_ccid;
 extern int          sysctl_dccp_tx_ccid;
 
-extern int  dccp_feat_init(struct sock *sk);
-extern void dccp_feat_initialise_sysctls(void);
-extern int  dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
-                                 u8 const *list, u8 len);
-extern int  dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
-                                   u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
-extern int  dccp_feat_clone_list(struct list_head const *, struct list_head *);
+int dccp_feat_init(struct sock *sk);
+void dccp_feat_initialise_sysctls(void);
+int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
+                         u8 const *list, u8 len);
+int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
+                           u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
+int dccp_feat_clone_list(struct list_head const *, struct list_head *);
 
 /*
  * Encoding variable-length options and their maximum length.
@@ -127,11 +127,11 @@ extern int  dccp_feat_clone_list(struct list_head const *, struct list_head *);
  */
 #define DCCP_OPTVAL_MAXLEN     6
 
-extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
-extern u64  dccp_decode_value_var(const u8 *bf, const u8 len);
-extern u64  dccp_feat_nn_get(struct sock *sk, u8 feat);
+void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
+u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
 
-extern int  dccp_insert_option_mandatory(struct sk_buff *skb);
-extern int  dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
-                              u8 *val, u8 len, bool repeat_first);
+int dccp_insert_option_mandatory(struct sk_buff *skb);
+int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, u8 *val, u8 len,
+                      bool repeat_first);
 #endif /* _DCCP_FEAT_H */
index ebc54fef85a5ed15110fe39002defd42d6a0b8f7..720c36225ed9b3219a5b5929e509fb9f660e0d08 100644 (file)
@@ -409,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        newinet            = inet_sk(newsk);
        ireq               = inet_rsk(req);
-       newinet->inet_daddr     = ireq->rmt_addr;
-       newinet->inet_rcv_saddr = ireq->loc_addr;
-       newinet->inet_saddr     = ireq->loc_addr;
+       newinet->inet_daddr     = ireq->ir_rmt_addr;
+       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       newinet->inet_saddr     = ireq->ir_loc_addr;
        newinet->inet_opt       = ireq->opt;
        ireq->opt          = NULL;
        newinet->mc_index  = inet_iif(skb);
@@ -516,10 +516,10 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct dccp_hdr *dh = dccp_hdr(skb);
 
-               dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
-                                                             ireq->rmt_addr);
-               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-                                           ireq->rmt_addr,
+               dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
+                                                             ireq->ir_rmt_addr);
+               err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+                                           ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
        }
@@ -641,8 +641,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq = inet_rsk(req);
-       ireq->loc_addr = ip_hdr(skb)->daddr;
-       ireq->rmt_addr = ip_hdr(skb)->saddr;
+       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
 
        /*
         * Step 3: Process LISTEN state
index 6cf9f7782ad4238208173f390369b5fc4cc75e2b..4ac71ff7c2e47c4a3bcec19206845c8afd7ce9e8 100644 (file)
@@ -67,7 +67,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
        struct dccp_hdr *dh = dccp_hdr(skb);
 
        dccp_csum_outgoing(skb);
-       dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+       dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
 }
 
 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
@@ -216,7 +216,7 @@ out:
 
 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
 {
-       struct inet6_request_sock *ireq6 = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sk_buff *skb;
        struct in6_addr *final_p, final;
@@ -226,12 +226,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_DCCP;
-       fl6.daddr = ireq6->rmt_addr;
-       fl6.saddr = ireq6->loc_addr;
+       fl6.daddr = ireq->ir_v6_rmt_addr;
+       fl6.saddr = ireq->ir_v6_loc_addr;
        fl6.flowlabel = 0;
-       fl6.flowi6_oif = ireq6->iif;
-       fl6.fl6_dport = inet_rsk(req)->rmt_port;
-       fl6.fl6_sport = inet_rsk(req)->loc_port;
+       fl6.flowi6_oif = ireq->ir_iif;
+       fl6.fl6_dport = ireq->ir_rmt_port;
+       fl6.fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 
@@ -249,9 +249,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
                struct dccp_hdr *dh = dccp_hdr(skb);
 
                dh->dccph_checksum = dccp_v6_csum_finish(skb,
-                                                        &ireq6->loc_addr,
-                                                        &ireq6->rmt_addr);
-               fl6.daddr = ireq6->rmt_addr;
+                                                        &ireq->ir_v6_loc_addr,
+                                                        &ireq->ir_v6_rmt_addr);
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -264,8 +264,7 @@ done:
 static void dccp_v6_reqsk_destructor(struct request_sock *req)
 {
        dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-       if (inet6_rsk(req)->pktopts != NULL)
-               kfree_skb(inet6_rsk(req)->pktopts);
+       kfree_skb(inet_rsk(req)->pktopts);
 }
 
 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
@@ -359,7 +358,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct request_sock *req;
        struct dccp_request_sock *dreq;
-       struct inet6_request_sock *ireq6;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
        struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -398,22 +397,22 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
 
-       ireq6 = inet6_rsk(req);
-       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq = inet_rsk(req);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                atomic_inc(&skb->users);
-               ireq6->pktopts = skb;
+               ireq->pktopts = skb;
        }
-       ireq6->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq6->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        /*
         * Step 3: Process LISTEN state
@@ -446,7 +445,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                                              struct request_sock *req,
                                              struct dst_entry *dst)
 {
-       struct inet6_request_sock *ireq6 = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct inet_sock *newinet;
        struct dccp6_sock *newdp6;
@@ -467,11 +466,11 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               newnp->rcv_saddr = newnp->saddr;
+               newsk->sk_v6_rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
                newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -505,12 +504,12 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_DCCP;
-               fl6.daddr = ireq6->rmt_addr;
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               fl6.saddr = ireq6->loc_addr;
+               fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
-               fl6.fl6_dport = inet_rsk(req)->rmt_port;
-               fl6.fl6_sport = inet_rsk(req)->loc_port;
+               fl6.fl6_dport = ireq->ir_rmt_port;
+               fl6.fl6_sport = htons(ireq->ir_num);
                security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
                dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
@@ -538,10 +537,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       newnp->daddr = ireq6->rmt_addr;
-       newnp->saddr = ireq6->loc_addr;
-       newnp->rcv_saddr = ireq6->loc_addr;
-       newsk->sk_bound_dev_if = ireq6->iif;
+       newsk->sk_v6_daddr      = ireq->ir_v6_rmt_addr;
+       newnp->saddr            = ireq->ir_v6_loc_addr;
+       newsk->sk_v6_rcv_saddr  = ireq->ir_v6_loc_addr;
+       newsk->sk_bound_dev_if  = ireq->ir_iif;
 
        /* Now IPv6 options...
 
@@ -554,10 +553,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        /* Clone pktoptions received with SYN */
        newnp->pktoptions = NULL;
-       if (ireq6->pktopts != NULL) {
-               newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
-               consume_skb(ireq6->pktopts);
-               ireq6->pktopts = NULL;
+       if (ireq->pktopts != NULL) {
+               newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
+               consume_skb(ireq->pktopts);
+               ireq->pktopts = NULL;
                if (newnp->pktoptions)
                        skb_set_owner_r(newnp->pktoptions, newsk);
        }
@@ -885,7 +884,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        return -EINVAL;
        }
 
-       np->daddr = usin->sin6_addr;
+       sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -915,16 +914,16 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        goto failure;
                }
                ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
-               ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
+               ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
 
                return err;
        }
 
-       if (!ipv6_addr_any(&np->rcv_saddr))
-               saddr = &np->rcv_saddr;
+       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               saddr = &sk->sk_v6_rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.fl6_dport = usin->sin6_port;
@@ -941,7 +940,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               np->rcv_saddr = *saddr;
+               sk->sk_v6_rcv_saddr = *saddr;
        }
 
        /* set the source address */
@@ -963,7 +962,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                goto late_failure;
 
        dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
-                                                     np->daddr.s6_addr32,
+                                                     sk->sk_v6_daddr.s6_addr32,
                                                      inet->inet_sport,
                                                      inet->inet_dport);
        err = dccp_connect(sk);
index 6eef81fdbe566f5623fc0816370ac15b4c01dc5a..af259e15e7f0b8c4cb09381bba5423a59c7e5540 100644 (file)
@@ -25,12 +25,10 @@ struct dccp6_sock {
 
 struct dccp6_request_sock {
        struct dccp_request_sock  dccp;
-       struct inet6_request_sock inet6;
 };
 
 struct dccp6_timewait_sock {
        struct inet_timewait_sock   inet;
-       struct inet6_timewait_sock  tw6;
 };
 
 #endif /* _DCCP_IPV6_H */
index 662071b249cc627130cf90eeaa8e5b7bcd8b829d..9e2f78bc1553e38ed45c66e3c3aba4db302fa0b0 100644 (file)
@@ -56,12 +56,9 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        const struct ipv6_pinfo *np = inet6_sk(sk);
-                       struct inet6_timewait_sock *tw6;
 
-                       tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-                       tw6 = inet6_twsk((struct sock *)tw);
-                       tw6->tw_v6_daddr = np->daddr;
-                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+                       tw->tw_v6_daddr = sk->sk_v6_daddr;
+                       tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_ipv6only = np->ipv6only;
                }
 #endif
@@ -269,10 +266,10 @@ int dccp_reqsk_init(struct request_sock *req,
 {
        struct dccp_request_sock *dreq = dccp_rsk(req);
 
-       inet_rsk(req)->rmt_port   = dccp_hdr(skb)->dccph_sport;
-       inet_rsk(req)->loc_port   = dccp_hdr(skb)->dccph_dport;
-       inet_rsk(req)->acked      = 0;
-       dreq->dreq_timestamp_echo = 0;
+       inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
+       inet_rsk(req)->ir_num      = ntohs(dccp_hdr(skb)->dccph_dport);
+       inet_rsk(req)->acked       = 0;
+       dreq->dreq_timestamp_echo  = 0;
 
        /* inherit feature negotiation options from listening socket */
        return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
index d17fc90a74b63822bb6de1f647c34903a855b197..8876078859dac20ef543043ae5b98ec42f8731cd 100644 (file)
@@ -424,8 +424,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
        /* Build and checksum header */
        dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
-       dh->dccph_sport = inet_rsk(req)->loc_port;
-       dh->dccph_dport = inet_rsk(req)->rmt_port;
+       dh->dccph_sport = htons(inet_rsk(req)->ir_num);
+       dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
        dh->dccph_doff  = (dccp_header_size +
                           DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
        dh->dccph_type  = DCCP_PKT_RESPONSE;
index ba64750f038726a990caa71d90e45f77bad93230..eb892b4f48144966e47f386108942f51b8b85e50 100644 (file)
@@ -1158,10 +1158,8 @@ static int __init dccp_init(void)
                goto out_free_bind_bucket_cachep;
        }
 
-       for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
+       for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
                INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
-               INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
-       }
 
        if (inet_ehash_locks_alloc(&dccp_hashinfo))
                        goto out_free_dccp_ehash;
index 2a7efe388344fdd37b10487c50ba3d712eedbf8a..e83015cecfa7507d551bd19e4b4121ad0f25eeaf 100644 (file)
@@ -87,7 +87,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
 }
 
 
-static unsigned int dnrmg_hook(unsigned int hook,
+static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
                        struct sk_buff *skb,
                        const struct net_device *in,
                        const struct net_device *out,
index be1f64d35358fbfbf61d84683c52e4d6fb423f34..8f032bae60ad8fdc8e284c5cd5ae7501660fcf50 100644 (file)
@@ -58,7 +58,7 @@
 #include <net/ipv6.h>
 #include <net/ip.h>
 #include <net/dsa.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 __setup("ether=", netdev_boot_setup);
 
@@ -133,7 +133,7 @@ int eth_rebuild_header(struct sk_buff *skb)
                return arp_find(eth->h_dest, skb);
 #endif
        default:
-               printk(KERN_DEBUG
+               netdev_dbg(dev,
                       "%s: unable to resolve type %X addresses.\n",
                       dev->name, ntohs(eth->h_proto));
 
@@ -169,20 +169,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
                else
                        skb->pkt_type = PACKET_MULTICAST;
        }
-
-       /*
-        *      This ALLMULTI check should be redundant by 1.4
-        *      so don't forget to remove it.
-        *
-        *      Seems, you forgot to remove it. All silly devices
-        *      seems to set IFF_PROMISC.
-        */
-
-       else if (1 /*dev->flags&IFF_PROMISC */ ) {
-               if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-                                                     dev->dev_addr)))
-                       skb->pkt_type = PACKET_OTHERHOST;
-       }
+       else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+                                                  dev->dev_addr)))
+               skb->pkt_type = PACKET_OTHERHOST;
 
        /*
         * Some variants of DSA tagging don't have an ethertype field
@@ -190,12 +179,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         * variants has been configured on the receiving interface,
         * and if so, set skb->protocol without looking at the packet.
         */
-       if (netdev_uses_dsa_tags(dev))
+       if (unlikely(netdev_uses_dsa_tags(dev)))
                return htons(ETH_P_DSA);
-       if (netdev_uses_trailer_tags(dev))
+
+       if (unlikely(netdev_uses_trailer_tags(dev)))
                return htons(ETH_P_TRAILER);
 
-       if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+       if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
                return eth->h_proto;
 
        /*
@@ -204,7 +194,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
         *      won't work for fault tolerant netware but does for the rest.
         */
-       if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
+       if (unlikely(skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF))
                return htons(ETH_P_802_3);
 
        /*
index c85e71e0c7ffc640bd9592ce52b03dbde6cad926..ff41b4d60d302e18a1ebcf1bdfc950970fdd3aae 100644 (file)
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
        real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
+       if (real_dev->type != ARPHRD_IEEE802154)
+               return -EINVAL;
 
        lowpan_dev_info(dev)->real_dev = real_dev;
        lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
 
        entry->ldev = dev;
 
+       /* Set the lowpan harware address to the wpan hardware address. */
+       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
        mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
        INIT_LIST_HEAD(&entry->list);
        list_add_tail(&entry->list, &lowpan_devices);
index 7a1874b7b8fd4431b6cc7d383ae5e96923dbdda3..f4a159e705c0c57af71d6efb99a3608f2a304f5a 100644 (file)
@@ -245,31 +245,6 @@ out:
 }
 EXPORT_SYMBOL(inet_listen);
 
-u32 inet_ehash_secret __read_mostly;
-EXPORT_SYMBOL(inet_ehash_secret);
-
-u32 ipv6_hash_secret __read_mostly;
-EXPORT_SYMBOL(ipv6_hash_secret);
-
-/*
- * inet_ehash_secret must be set exactly once, and to a non nul value
- * ipv6_hash_secret must be set exactly once.
- */
-void build_ehash_secret(void)
-{
-       u32 rnd;
-
-       do {
-               get_random_bytes(&rnd, sizeof(rnd));
-       } while (rnd == 0);
-
-       if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
-               get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-               net_secret_init();
-       }
-}
-EXPORT_SYMBOL(build_ehash_secret);
-
 /*
  *     Create an inet socket.
  */
@@ -286,10 +261,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
-       if (unlikely(!inet_ehash_secret))
-               if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
-                       build_ehash_secret();
-
        sock->state = SS_UNCONNECTED;
 
        /* Look for the requested type/protocol pair. */
@@ -1256,36 +1227,36 @@ static int inet_gso_send_check(struct sk_buff *skb)
        if (ihl < sizeof(*iph))
                goto out;
 
+       proto = iph->protocol;
+
+       /* Warning: after this point, iph might be no longer valid */
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
-
        __skb_pull(skb, ihl);
+
        skb_reset_transport_header(skb);
-       iph = ip_hdr(skb);
-       proto = iph->protocol;
        err = -EPROTONOSUPPORT;
 
-       rcu_read_lock();
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_send_check))
                err = ops->callbacks.gso_send_check(skb);
-       rcu_read_unlock();
 
 out:
        return err;
 }
 
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
-       netdev_features_t features)
+                                       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
+       unsigned int offset = 0;
        struct iphdr *iph;
+       bool tunnel;
        int proto;
+       int nhoff;
        int ihl;
        int id;
-       unsigned int offset = 0;
-       bool tunnel;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
                     ~(SKB_GSO_TCPV4 |
@@ -1293,12 +1264,16 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
+                      SKB_GSO_IPIP |
+                      SKB_GSO_SIT |
                       SKB_GSO_TCPV6 |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_MPLS |
                       0)))
                goto out;
 
+       skb_reset_network_header(skb);
+       nhoff = skb_network_header(skb) - skb_mac_header(skb);
        if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
                goto out;
 
@@ -1307,42 +1282,49 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        if (ihl < sizeof(*iph))
                goto out;
 
+       id = ntohs(iph->id);
+       proto = iph->protocol;
+
+       /* Warning: after this point, iph might be no longer valid */
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
+       __skb_pull(skb, ihl);
 
-       tunnel = !!skb->encapsulation;
+       tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+       if (tunnel)
+               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       SKB_GSO_CB(skb)->encap_level += ihl;
 
-       __skb_pull(skb, ihl);
        skb_reset_transport_header(skb);
-       iph = ip_hdr(skb);
-       id = ntohs(iph->id);
-       proto = iph->protocol;
+
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
-       rcu_read_lock();
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
                segs = ops->callbacks.gso_segment(skb, features);
-       rcu_read_unlock();
 
        if (IS_ERR_OR_NULL(segs))
                goto out;
 
        skb = segs;
        do {
-               iph = ip_hdr(skb);
+               iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
                if (!tunnel && proto == IPPROTO_UDP) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
                        if (skb->next != NULL)
                                iph->frag_off |= htons(IP_MF);
-                       offset += (skb->len - skb->mac_len - iph->ihl * 4);
+                       offset += skb->len - nhoff - ihl;
                } else  {
                        iph->id = htons(id++);
                }
-               iph->tot_len = htons(skb->len - skb->mac_len);
-               iph->check = 0;
-               iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
+               iph->tot_len = htons(skb->len - nhoff);
+               ip_send_check(iph);
+               if (tunnel) {
+                       skb_reset_inner_headers(skb);
+                       skb->encapsulation = 1;
+               }
+               skb->network_header = (u8 *)iph - skb->head;
        } while ((skb = skb->next));
 
 out:
@@ -1548,6 +1530,7 @@ static const struct net_protocol tcp_protocol = {
 };
 
 static const struct net_protocol udp_protocol = {
+       .early_demux =  udp_v4_early_demux,
        .handler =      udp_rcv,
        .err_handler =  udp_err,
        .no_policy =    1,
@@ -1648,6 +1631,13 @@ static struct packet_offload ip_packet_offload __read_mostly = {
        },
 };
 
+static const struct net_offload ipip_offload = {
+       .callbacks = {
+               .gso_send_check = inet_gso_send_check,
+               .gso_segment    = inet_gso_segment,
+       },
+};
+
 static int __init ipv4_offload_init(void)
 {
        /*
@@ -1659,6 +1649,7 @@ static int __init ipv4_offload_init(void)
                pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
 
        dev_add_offload(&ip_packet_offload);
+       inet_add_offload(&ipip_offload, IPPROTO_IPIP);
        return 0;
 }
 
@@ -1707,8 +1698,6 @@ static int __init inet_init(void)
        ip_static_sysctl_init();
 #endif
 
-       tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
        /*
         *      Add all the base protocols.
         */
index b3f627ac4ed8ae5031d559a5648612b46f015e25..d846304b7b89d619d53609c5e435b3ddcc3e2d84 100644 (file)
@@ -933,7 +933,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                local_bh_disable();
 
                frn->tb_id = tb->tb_id;
-               rcu_read_lock();
                frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
 
                if (!frn->err) {
@@ -942,7 +941,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                        frn->type = res.type;
                        frn->scope = res.scope;
                }
-               rcu_read_unlock();
                local_bh_enable();
        }
 }
index af0f14aba169a2fa11e03eb0fb1bde4b50261568..388d113fd289b9134cf34e930a26c6c99b7ea721 100644 (file)
@@ -24,21 +24,17 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
 }
 
 /* Exported by fib_semantics.c */
-extern void fib_release_info(struct fib_info *);
-extern struct fib_info *fib_create_info(struct fib_config *cfg);
-extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
-extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
-                        u32 tb_id, u8 type, __be32 dst,
-                        int dst_len, u8 tos, struct fib_info *fi,
-                        unsigned int);
-extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-                     int dst_len, u32 tb_id, struct nl_info *info,
-                     unsigned int nlm_flags);
-extern struct fib_alias *fib_find_alias(struct list_head *fah,
-                                       u8 tos, u32 prio);
-extern int fib_detect_death(struct fib_info *fi, int order,
-                           struct fib_info **last_resort,
-                           int *last_idx, int dflt);
+void fib_release_info(struct fib_info *);
+struct fib_info *fib_create_info(struct fib_config *cfg);
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
+int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
+                 u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
+                 unsigned int);
+void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
+              u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
+struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
+int fib_detect_death(struct fib_info *fi, int order,
+                    struct fib_info **last_resort, int *last_idx, int dflt);
 
 static inline void fib_result_assign(struct fib_result *res,
                                     struct fib_info *fi)
index d5dbca5ecf628c4dd5a52daf306047fb971de33e..e63f47a4e651f2c5587723f7796e2fd9ec43b4f8 100644 (file)
@@ -380,7 +380,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
 }
 
 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-              int dst_len, u32 tb_id, struct nl_info *info,
+              int dst_len, u32 tb_id, const struct nl_info *info,
               unsigned int nlm_flags)
 {
        struct sk_buff *skb;
index 3df6d3edb2a15a98cb0e90a4e5ed935f42f15f1c..ec9a9ef4ce50851639cf4cbe8e497390bea371f6 100644 (file)
@@ -762,12 +762,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
 
                if (IS_LEAF(node) || ((struct tnode *) node)->pos >
                   tn->pos + tn->bits - 1) {
-                       if (tkey_extract_bits(node->key,
-                                             oldtnode->pos + oldtnode->bits,
-                                             1) == 0)
-                               put_child(tn, 2*i, node);
-                       else
-                               put_child(tn, 2*i+1, node);
+                       put_child(tn,
+                               tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
+                               node);
                        continue;
                }
 
@@ -1120,12 +1117,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
                 *  first tnode need some special handling
                 */
 
-               if (tp)
-                       pos = tp->pos+tp->bits;
-               else
-                       pos = 0;
-
                if (n) {
+                       pos = tp ? tp->pos+tp->bits : 0;
                        newpos = tkey_mismatch(key, pos, n->key);
                        tn = tnode_new(n->key, newpos, 1);
                } else {
index 736c9fc3ef93c0f2de02658997782f7af011c122..5893e99e82990ae75f4221249e3e5fcbceb819c4 100644 (file)
@@ -93,35 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 }
 EXPORT_SYMBOL_GPL(gre_build_header);
 
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
-{
-       int err;
-
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
-
-       if (skb_is_gso(skb)) {
-               err = skb_unclone(skb, GFP_ATOMIC);
-               if (unlikely(err))
-                       goto error;
-               skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
-               return skb;
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
-               err = skb_checksum_help(skb);
-               if (unlikely(err))
-                       goto error;
-       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
-               skb->ip_summed = CHECKSUM_NONE;
-
-       return skb;
-error:
-       kfree_skb(skb);
-       return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(gre_handle_offloads);
-
 static __sum16 check_checksum(struct sk_buff *skb)
 {
        __sum16 csum = 0;
index 55e6bfb3a28921c26b0be3170f10c1245a9a74ae..e5d436188464eba55eca773f4fb88fd0f0634e97 100644 (file)
@@ -39,7 +39,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
-                                 SKB_GSO_GRE)))
+                                 SKB_GSO_GRE |
+                                 SKB_GSO_IPIP)))
                goto out;
 
        if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
index 5f7d11a458713f9c755dd1a1a40289b180a3e041..5c0e8bc6e5ba275d2469336533b5848de94ae6c1 100644 (file)
@@ -353,6 +353,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
+
        if (icmp_param->replyopts.opt.opt.optlen) {
                ipc.opt = &icmp_param->replyopts.opt;
                if (ipc.opt->opt.srr)
@@ -608,6 +611,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
                               type, code, icmp_param);
index dace87f06e5f9bf22ed99d078d058ea8f1339f4a..7defdc9ba16744fd263c539c37f125fc31422c87 100644 (file)
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data)
 
        in_dev->mr_gq_running = 0;
        igmpv3_send_report(in_dev, NULL);
-       __in_dev_put(in_dev);
+       in_dev_put(in_dev);
 }
 
 static void igmp_ifc_timer_expire(unsigned long data)
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
                igmp_ifc_start_timer(in_dev,
                                     unsolicited_report_interval(in_dev));
        }
-       __in_dev_put(in_dev);
+       in_dev_put(in_dev);
 }
 
 static void igmp_ifc_event(struct in_device *in_dev)
index 6acb541c90910204f02449e7500138362da6998a..fc0e649cc002beb0631c390e6aed5f1ea3fe7390 100644 (file)
@@ -29,27 +29,19 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-/*
- * This struct holds the first and last local port number.
- */
-struct local_ports sysctl_local_ports __read_mostly = {
-       .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
-       .range = { 32768, 61000 },
-};
-
 unsigned long *sysctl_local_reserved_ports;
 EXPORT_SYMBOL(sysctl_local_reserved_ports);
 
-void inet_get_local_port_range(int *low, int *high)
+void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
-               *low = sysctl_local_ports.range[0];
-               *high = sysctl_local_ports.range[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+               *low = net->ipv4.sysctl_local_ports.range[0];
+               *high = net->ipv4.sysctl_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -79,17 +71,16 @@ int inet_csk_bind_conflict(const struct sock *sk,
                            (!reuseport || !sk2->sk_reuseport ||
                            (sk2->sk_state != TCP_TIME_WAIT &&
                             !uid_eq(uid, sock_i_uid(sk2))))) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                        if (!relax && reuse && sk2->sk_reuse &&
                            sk2->sk_state != TCP_LISTEN) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                }
@@ -116,7 +107,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
                int remaining, rover, low, high;
 
 again:
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
                smallest_rover = rover = net_random() % remaining + low;
 
@@ -421,8 +412,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol,
                           flags,
-                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -457,8 +448,8 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
-                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -504,9 +495,9 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
             prev = &req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
-               if (ireq->rmt_port == rport &&
-                   ireq->rmt_addr == raddr &&
-                   ireq->loc_addr == laddr &&
+               if (ireq->ir_rmt_port == rport &&
+                   ireq->ir_rmt_addr == raddr &&
+                   ireq->ir_loc_addr == laddr &&
                    AF_INET_FAMILY(req->rsk_ops->family)) {
                        WARN_ON(req->sk);
                        *prevp = prev;
@@ -523,7 +514,8 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
+       const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
+                                    inet_rsk(req)->ir_rmt_port,
                                     lopt->hash_rnd, lopt->nr_table_entries);
 
        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -683,9 +675,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newsk->sk_state = TCP_SYN_RECV;
                newicsk->icsk_bind_hash = NULL;
 
-               inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
-               inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
-               inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
+               inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
+               inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
+               inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
                newsk->sk_write_space = sk_stream_write_space;
 
                newicsk->icsk_retransmits = 0;
index 5f648751fce2d03418f4a6425c7ba0dbfb3ddfc0..56a964a553d2c739a03b880acd7158a4c9714b66 100644 (file)
@@ -121,13 +121,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-               const struct ipv6_pinfo *np = inet6_sk(sk);
 
-               *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = np->daddr;
+               *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
 
                if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-                       if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+                       if (nla_put_u8(skb, INET_DIAG_TCLASS,
+                                      inet6_sk(sk)->tclass) < 0)
                                goto errout;
        }
 #endif
@@ -222,7 +222,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
-       long tmo;
+       s32 tmo;
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
 
@@ -234,7 +234,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r = nlmsg_data(nlh);
        BUG_ON(tw->tw_state != TCP_TIME_WAIT);
 
-       tmo = tw->tw_ttd - jiffies;
+       tmo = tw->tw_ttd - inet_tw_time_stamp();
        if (tmo < 0)
                tmo = 0;
 
@@ -248,18 +248,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->id.idiag_dst[0]    = tw->tw_daddr;
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
-       r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
+       r->idiag_expires      = jiffies_to_msecs(tmo);
        r->idiag_rqueue       = 0;
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
 #if IS_ENABLED(CONFIG_IPV6)
        if (tw->tw_family == AF_INET6) {
-               const struct inet6_timewait_sock *tw6 =
-                                               inet6_twsk((struct sock *)tw);
-
-               *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
+               *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
        }
 #endif
 
@@ -273,10 +270,11 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
-               return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, r, portid, seq, nlmsg_flags,
-                                          unlh);
-       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
+               return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+                                          nlmsg_flags, unlh);
+
+       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
+                                 nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -338,12 +336,9 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
                err = 0;
 
 out:
-       if (sk) {
-               if (sk->sk_state == TCP_TIME_WAIT)
-                       inet_twsk_put((struct inet_timewait_sock *)sk);
-               else
-                       sock_put(sk);
-       }
+       if (sk)
+               sock_gen_put(sk);
+
 out_nosk:
        return err;
 }
@@ -489,10 +484,9 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
        entry.family = sk->sk_family;
 #if IS_ENABLED(CONFIG_IPV6)
        if (entry.family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
 
-               entry.saddr = np->rcv_saddr.s6_addr32;
-               entry.daddr = np->daddr.s6_addr32;
+               entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+               entry.daddr = sk->sk_v6_daddr.s6_addr32;
        } else
 #endif
        {
@@ -635,22 +629,22 @@ static int inet_csk_diag_dump(struct sock *sk,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
-static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+static int inet_twsk_diag_dump(struct sock *sk,
                               struct sk_buff *skb,
                               struct netlink_callback *cb,
                               struct inet_diag_req_v2 *r,
                               const struct nlattr *bc)
 {
+       struct inet_timewait_sock *tw = inet_twsk(sk);
+
        if (bc != NULL) {
                struct inet_diag_entry entry;
 
                entry.family = tw->tw_family;
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == AF_INET6) {
-                       struct inet6_timewait_sock *tw6 =
-                                               inet6_twsk((struct sock *)tw);
-                       entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
-                       entry.daddr = tw6->tw_v6_daddr.s6_addr32;
+                       entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
+                       entry.daddr = tw->tw_v6_daddr.s6_addr32;
                } else
 #endif
                {
@@ -682,12 +676,12 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
 #if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == AF_INET6) {
                if (req->rsk_ops->family == AF_INET6) {
-                       entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
-                       entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+                       entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
+                       entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
                } else if (req->rsk_ops->family == AF_INET) {
-                       ipv6_addr_set_v4mapped(ireq->loc_addr,
+                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
                                               &entry->saddr_storage);
-                       ipv6_addr_set_v4mapped(ireq->rmt_addr,
+                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
                                               &entry->daddr_storage);
                        entry->saddr = entry->saddr_storage.s6_addr32;
                        entry->daddr = entry->daddr_storage.s6_addr32;
@@ -695,8 +689,8 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
        } else
 #endif
        {
-               entry->saddr = &ireq->loc_addr;
-               entry->daddr = &ireq->rmt_addr;
+               entry->saddr = &ireq->ir_loc_addr;
+               entry->daddr = &ireq->ir_rmt_addr;
        }
 }
 
@@ -731,9 +725,9 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
                tmo = 0;
 
        r->id.idiag_sport = inet->inet_sport;
-       r->id.idiag_dport = ireq->rmt_port;
-       r->id.idiag_src[0] = ireq->loc_addr;
-       r->id.idiag_dst[0] = ireq->rmt_addr;
+       r->id.idiag_dport = ireq->ir_rmt_port;
+       r->id.idiag_src[0] = ireq->ir_loc_addr;
+       r->id.idiag_dst[0] = ireq->ir_rmt_addr;
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
@@ -792,13 +786,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        if (reqnum < s_reqnum)
                                continue;
-                       if (r->id.idiag_dport != ireq->rmt_port &&
+                       if (r->id.idiag_dport != ireq->ir_rmt_port &&
                            r->id.idiag_dport)
                                continue;
 
                        if (bc) {
                                inet_diag_req_addrs(sk, req, &entry);
-                               entry.dport = ntohs(ireq->rmt_port);
+                               entry.dport = ntohs(ireq->ir_rmt_port);
 
                                if (!inet_diag_bc_run(bc, &entry))
                                        continue;
@@ -911,8 +905,7 @@ skip_listen_ht:
 
                num = 0;
 
-               if (hlist_nulls_empty(&head->chain) &&
-                       hlist_nulls_empty(&head->twchain))
+               if (hlist_nulls_empty(&head->chain))
                        continue;
 
                if (i > s_i)
@@ -920,7 +913,7 @@ skip_listen_ht:
 
                spin_lock_bh(lock);
                sk_nulls_for_each(sk, node, &head->chain) {
-                       struct inet_sock *inet = inet_sk(sk);
+                       int res;
 
                        if (!net_eq(sock_net(sk), net))
                                continue;
@@ -929,15 +922,19 @@ skip_listen_ht:
                        if (!(r->idiag_states & (1 << sk->sk_state)))
                                goto next_normal;
                        if (r->sdiag_family != AF_UNSPEC &&
-                                       sk->sk_family != r->sdiag_family)
+                           sk->sk_family != r->sdiag_family)
                                goto next_normal;
-                       if (r->id.idiag_sport != inet->inet_sport &&
+                       if (r->id.idiag_sport != htons(sk->sk_num) &&
                            r->id.idiag_sport)
                                goto next_normal;
-                       if (r->id.idiag_dport != inet->inet_dport &&
+                       if (r->id.idiag_dport != sk->sk_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                       if (sk->sk_state == TCP_TIME_WAIT)
+                               res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
+                       else
+                               res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+                       if (res < 0) {
                                spin_unlock_bh(lock);
                                goto done;
                        }
@@ -945,33 +942,6 @@ next_normal:
                        ++num;
                }
 
-               if (r->idiag_states & TCPF_TIME_WAIT) {
-                       struct inet_timewait_sock *tw;
-
-                       inet_twsk_for_each(tw, node,
-                                   &head->twchain) {
-                               if (!net_eq(twsk_net(tw), net))
-                                       continue;
-
-                               if (num < s_num)
-                                       goto next_dying;
-                               if (r->sdiag_family != AF_UNSPEC &&
-                                               tw->tw_family != r->sdiag_family)
-                                       goto next_dying;
-                               if (r->id.idiag_sport != tw->tw_sport &&
-                                   r->id.idiag_sport)
-                                       goto next_dying;
-                               if (r->id.idiag_dport != tw->tw_dport &&
-                                   r->id.idiag_dport)
-                                       goto next_dying;
-                               if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
-                                       spin_unlock_bh(lock);
-                                       goto done;
-                               }
-next_dying:
-                               ++num;
-                       }
-               }
                spin_unlock_bh(lock);
        }
 
index c5313a9c019b9b94aca569aba011b63fc452e1c5..bb075fc9a14f25169c175c7fcdcb86d56c709627 100644 (file)
@@ -93,9 +93,6 @@ void inet_frags_init(struct inet_frags *f)
        }
        rwlock_init(&f->lock);
 
-       f->rnd = (u32) ((totalram_pages ^ (totalram_pages >> 7)) ^
-                                  (jiffies ^ (jiffies >> 6)));
-
        setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
                        (unsigned long)f);
        f->secret_timer.expires = jiffies + f->secret_interval;
index 7bd8983dbfcf308e61dc55bb9491b3dd6866fa35..8b9cf279450d6cf0c24e64a20fb0d05b9fb89a82 100644 (file)
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
+                                const __u16 lport, const __be32 faddr,
+                                const __be16 fport)
+{
+       static u32 inet_ehash_secret __read_mostly;
+
+       net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
+
+       return __inet_ehashfn(laddr, lport, faddr, fport,
+                             inet_ehash_secret + net_hash_mix(net));
+}
+
+
+static unsigned int inet_sk_ehashfn(const struct sock *sk)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const __be32 laddr = inet->inet_rcv_saddr;
+       const __u16 lport = inet->inet_num;
+       const __be32 faddr = inet->inet_daddr;
+       const __be16 fport = inet->inet_dport;
+       struct net *net = sock_net(sk);
+
+       return inet_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 /*
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
@@ -230,6 +255,19 @@ begin:
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 
+/* All sockets share common refcount, but have different destructors */
+void sock_gen_put(struct sock *sk)
+{
+       if (!atomic_dec_and_test(&sk->sk_refcnt))
+               return;
+
+       if (sk->sk_state == TCP_TIME_WAIT)
+               inet_twsk_free(inet_twsk(sk));
+       else
+               sk_free(sk);
+}
+EXPORT_SYMBOL_GPL(sock_gen_put);
+
 struct sock *__inet_lookup_established(struct net *net,
                                  struct inet_hashinfo *hashinfo,
                                  const __be32 saddr, const __be16 sport,
@@ -255,13 +293,13 @@ begin:
                if (likely(INET_MATCH(sk, net, acookie,
                                      saddr, daddr, ports, dif))) {
                        if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-                               goto begintw;
+                               goto out;
                        if (unlikely(!INET_MATCH(sk, net, acookie,
                                                 saddr, daddr, ports, dif))) {
-                               sock_put(sk);
+                               sock_gen_put(sk);
                                goto begin;
                        }
-                       goto out;
+                       goto found;
                }
        }
        /*
@@ -271,37 +309,9 @@ begin:
         */
        if (get_nulls_value(node) != slot)
                goto begin;
-
-begintw:
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-               if (sk->sk_hash != hash)
-                       continue;
-               if (likely(INET_TW_MATCH(sk, net, acookie,
-                                        saddr, daddr, ports,
-                                        dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-                               sk = NULL;
-                               goto out;
-                       }
-                       if (unlikely(!INET_TW_MATCH(sk, net, acookie,
-                                                   saddr, daddr, ports,
-                                                   dif))) {
-                               sock_put(sk);
-                               goto begintw;
-                       }
-                       goto out;
-               }
-       }
-       /*
-        * if the nulls value we got at the end of this lookup is
-        * not the expected one, we must restart lookup.
-        * We probably met an item that was moved to another chain.
-        */
-       if (get_nulls_value(node) != slot)
-               goto begintw;
-       sk = NULL;
 out:
+       sk = NULL;
+found:
        rcu_read_unlock();
        return sk;
 }
@@ -326,39 +336,29 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
        struct sock *sk2;
        const struct hlist_nulls_node *node;
-       struct inet_timewait_sock *tw;
+       struct inet_timewait_sock *tw = NULL;
        int twrefcnt = 0;
 
        spin_lock(lock);
 
-       /* Check TIME-WAIT sockets first. */
-       sk_nulls_for_each(sk2, node, &head->twchain) {
-               if (sk2->sk_hash != hash)
-                       continue;
-
-               if (likely(INET_TW_MATCH(sk2, net, acookie,
-                                        saddr, daddr, ports, dif))) {
-                       tw = inet_twsk(sk2);
-                       if (twsk_unique(sk, sk2, twp))
-                               goto unique;
-                       else
-                               goto not_unique;
-               }
-       }
-       tw = NULL;
-
-       /* And established part... */
        sk_nulls_for_each(sk2, node, &head->chain) {
                if (sk2->sk_hash != hash)
                        continue;
+
                if (likely(INET_MATCH(sk2, net, acookie,
-                                     saddr, daddr, ports, dif)))
+                                        saddr, daddr, ports, dif))) {
+                       if (sk2->sk_state == TCP_TIME_WAIT) {
+                               tw = inet_twsk(sk2);
+                               if (twsk_unique(sk, sk2, twp))
+                                       break;
+                       }
                        goto not_unique;
+               }
        }
 
-unique:
        /* Must record num and sport now. Otherwise we will see
-        * in hash table socket with a funny identity. */
+        * in hash table socket with a funny identity.
+        */
        inet->inet_num = lport;
        inet->inet_sport = htons(lport);
        sk->sk_hash = hash;
@@ -494,7 +494,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                u32 offset = hint + port_offset;
                struct inet_timewait_sock *tw = NULL;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                local_bh_disable();
index 1f27c9f4afd07fbf55589ed4e6e730375dcdc0a3..6d592f8555fb8bf15506d828b53c6582386f7bec 100644 (file)
@@ -87,19 +87,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
        refcnt += inet_twsk_bind_unhash(tw, hashinfo);
        spin_unlock(&bhead->lock);
 
-#ifdef SOCK_REFCNT_DEBUG
-       if (atomic_read(&tw->tw_refcnt) != 1) {
-               pr_debug("%s timewait_sock %p refcnt=%d\n",
-                        tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
-       }
-#endif
-       while (refcnt) {
-               inet_twsk_put(tw);
-               refcnt--;
-       }
+       BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
+       atomic_sub(refcnt, &tw->tw_refcnt);
 }
 
-static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
+void inet_twsk_free(struct inet_timewait_sock *tw)
 {
        struct module *owner = tw->tw_prot->owner;
        twsk_destructor((struct sock *)tw);
@@ -118,6 +110,18 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
+static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+                                  struct hlist_nulls_head *list)
+{
+       hlist_nulls_add_head_rcu(&tw->tw_node, list);
+}
+
+static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+                                   struct hlist_head *list)
+{
+       hlist_add_head(&tw->tw_bind_node, list);
+}
+
 /*
  * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -146,26 +150,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        spin_lock(lock);
 
        /*
-        * Step 2: Hash TW into TIMEWAIT chain.
-        * Should be done before removing sk from established chain
-        * because readers are lockless and search established first.
+        * Step 2: Hash TW into tcp ehash chain.
+        * Notes :
+        * - tw_refcnt is set to 3 because :
+        * - We have one reference from bhash chain.
+        * - We have one reference from ehash chain.
+        * We can use atomic_set() because prior spin_lock()/spin_unlock()
+        * committed into memory all tw fields.
         */
-       inet_twsk_add_node_rcu(tw, &ehead->twchain);
+       atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+       inet_twsk_add_node_rcu(tw, &ehead->chain);
 
-       /* Step 3: Remove SK from established hash. */
+       /* Step 3: Remove SK from hash chain */
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       /*
-        * Notes :
-        * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
-        * - We add one reference for the bhash link
-        * - We add one reference for the ehash link
-        * - We want this refcnt update done before allowing other
-        *   threads to find this tw in ehash chain.
-        */
-       atomic_add(1 + 1 + 1, &tw->tw_refcnt);
-
        spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -387,11 +386,11 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
                        if (slot >= INET_TWDR_TWKILL_SLOTS)
                                slot = INET_TWDR_TWKILL_SLOTS - 1;
                }
-               tw->tw_ttd = jiffies + timeo;
+               tw->tw_ttd = inet_tw_time_stamp() + timeo;
                slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
                list = &twdr->cells[slot];
        } else {
-               tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
+               tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
 
                if (twdr->twcal_hand < 0) {
                        twdr->twcal_hand = 0;
@@ -490,7 +489,9 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 restart_rcu:
                rcu_read_lock();
 restart:
-               sk_nulls_for_each_rcu(sk, node, &head->twchain) {
+               sk_nulls_for_each_rcu(sk, node, &head->chain) {
+                       if (sk->sk_state != TCP_TIME_WAIT)
+                               continue;
                        tw = inet_twsk(sk);
                        if ((tw->tw_family != family) ||
                                atomic_read(&twsk_net(tw)->count))
index b66910aaef4d633977d2e983f2c63b419da38f77..2481993a49708337af890fad97c70f8d6c85f43d 100644 (file)
@@ -106,6 +106,7 @@ struct ip4_create_arg {
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
 {
+       net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
        return jhash_3words((__force u32)id << 16 | prot,
                            (__force u32)saddr, (__force u32)daddr,
                            ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
index a04d872c54f919c7133e7830773301cdf070f3ed..8fbac7de1e1b731e14aa485e75c1212618cace55 100644 (file)
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
                /* initialize protocol header pointer */
                skb->transport_header = skb->network_header + fragheaderlen;
 
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
 
-               /* specify the length of each IP datagram fragment */
-               skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
                __skb_queue_tail(queue, skb);
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
 
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* specify the length of each IP datagram fragment */
+       skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
@@ -1060,6 +1065,9 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
                         rt->dst.dev->mtu : dst_mtu(&rt->dst);
        cork->dst = &rt->dst;
        cork->length = 0;
+       cork->ttl = ipc->ttl;
+       cork->tos = ipc->tos;
+       cork->priority = ipc->priority;
        cork->tx_flags = ipc->tx_flags;
 
        return 0;
@@ -1311,7 +1319,9 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        if (cork->flags & IPCORK_OPT)
                opt = cork->opt;
 
-       if (rt->rt_type == RTN_MULTICAST)
+       if (cork->ttl != 0)
+               ttl = cork->ttl;
+       else if (rt->rt_type == RTN_MULTICAST)
                ttl = inet->mc_ttl;
        else
                ttl = ip_select_ttl(inet, &rt->dst);
@@ -1319,7 +1329,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        iph = ip_hdr(skb);
        iph->version = 4;
        iph->ihl = 5;
-       iph->tos = inet->tos;
+       iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
        iph->frag_off = df;
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
@@ -1331,7 +1341,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
                ip_options_build(skb, opt, cork->addr, rt, 0);
        }
 
-       skb->priority = sk->sk_priority;
+       skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
        skb->mark = sk->sk_mark;
        /*
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
@@ -1481,6 +1491,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
        ipc.addr = daddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        if (replyopts.opt.opt.optlen) {
                ipc.opt = &replyopts.opt;
index d9c4f113d7093bba7eba2beefc31cd0af4b9bb95..0626f2cb192e69ecb3fdbcf6d4d12884c7bc5c6d 100644 (file)
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(ip_cmsg_recv);
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
 {
-       int err;
+       int err, val;
        struct cmsghdr *cmsg;
 
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
@@ -215,6 +215,24 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
                        ipc->addr = info->ipi_spec_dst.s_addr;
                        break;
                }
+               case IP_TTL:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 1 || val > 255)
+                               return -EINVAL;
+                       ipc->ttl = val;
+                       break;
+               case IP_TOS:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 0 || val > 255)
+                               return -EINVAL;
+                       ipc->tos = val;
+                       ipc->priority = rt_tos2priority(ipc->tos);
+                       break;
+
                default:
                        return -EINVAL;
                }
@@ -1034,11 +1052,12 @@ e_inval:
  * destination in skb->cb[] before dst drop.
  * This way, receiver doesnt make cache line misses to read rtable.
  */
-void ipv4_pktinfo_prepare(struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
 {
        struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
 
-       if (skb_rtable(skb)) {
+       if ((inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) &&
+           skb_rtable(skb)) {
                pktinfo->ipi_ifindex = inet_iif(skb);
                pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
        } else {
index ac9fabe0300f613e87bbe4d70e9c861fbc95889e..63a6d6d6b87581d3ac3bda52cab5833f3cb169ab 100644 (file)
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        tunnel->err_count = 0;
        }
 
+       tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
        ttl = tnl_params->ttl;
        if (ttl == 0) {
                if (skb->protocol == htons(ETH_P_IP))
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len;
-       if (max_headroom > dev->needed_headroom) {
+       if (max_headroom > dev->needed_headroom)
                dev->needed_headroom = max_headroom;
-               if (skb_cow_head(skb, dev->needed_headroom)) {
-                       dev->stats.tx_dropped++;
-                       dev_kfree_skb(skb);
-                       return;
-               }
+
+       if (skb_cow_head(skb, dev->needed_headroom)) {
+               dev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return;
        }
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
-                           ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df,
-                           !net_eq(tunnel->net, dev_net(dev)));
+                           tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 
        return;
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
-       if (!IS_ERR(itn->fb_tunnel_dev))
+       if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
+       }
        rtnl_unlock();
 
        return PTR_RET(itn->fb_tunnel_dev);
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
                        if (!net_eq(dev_net(t->dev), net))
                                unregister_netdevice_queue(t->dev, head);
        }
-       if (itn->fb_tunnel_dev)
-               unregister_netdevice_queue(itn->fb_tunnel_dev, head);
 }
 
 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
index d6c856b17fd4ff22c0034af676a648730dffce82..42ffbc8d65c65fda2b288008b4aed7670b43ee3f 100644 (file)
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
        /* Push down and install the IP header. */
-       __skb_push(skb, sizeof(struct iphdr));
+       skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
 
        iph = ip_hdr(skb);
@@ -116,3 +116,36 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
        return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
+                                        bool csum_help,
+                                        int gso_type_mask)
+{
+       int err;
+
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
+       if (skb_is_gso(skb)) {
+               err = skb_unclone(skb, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto error;
+               skb_shinfo(skb)->gso_type |= gso_type_mask;
+               return skb;
+       }
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+               err = skb_checksum_help(skb);
+               if (unlikely(err))
+                       goto error;
+       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+               skb->ip_summed = CHECKSUM_NONE;
+
+       return skb;
+error:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
index e805e7b3030e3dad2f8fd83d140f0bb7f100d69c..5d9c845d288a3d8cce3eb96aeb65384e4a56edbc 100644 (file)
@@ -49,70 +49,6 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
 
-static int vti_err(struct sk_buff *skb, u32 info)
-{
-
-       /* All the routers (except for Linux) return only
-        * 8 bytes of packet payload. It means, that precise relaying of
-        * ICMP in the real Internet is absolutely infeasible.
-        */
-       struct net *net = dev_net(skb->dev);
-       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
-       struct iphdr *iph = (struct iphdr *)skb->data;
-       const int type = icmp_hdr(skb)->type;
-       const int code = icmp_hdr(skb)->code;
-       struct ip_tunnel *t;
-       int err;
-
-       switch (type) {
-       default:
-       case ICMP_PARAMETERPROB:
-               return 0;
-
-       case ICMP_DEST_UNREACH:
-               switch (code) {
-               case ICMP_SR_FAILED:
-               case ICMP_PORT_UNREACH:
-                       /* Impossible event. */
-                       return 0;
-               default:
-                       /* All others are translated to HOST_UNREACH. */
-                       break;
-               }
-               break;
-       case ICMP_TIME_EXCEEDED:
-               if (code != ICMP_EXC_TTL)
-                       return 0;
-               break;
-       }
-
-       err = -ENOENT;
-
-       t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
-                            iph->daddr, iph->saddr, 0);
-       if (t == NULL)
-               goto out;
-
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_IPIP, 0);
-               err = 0;
-               goto out;
-       }
-
-       err = 0;
-       if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               goto out;
-
-       if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
-               t->err_count++;
-       else
-               t->err_count = 1;
-       t->err_time = jiffies;
-out:
-       return err;
-}
-
 /* We dont digest the packet therefore let the packet pass */
 static int vti_rcv(struct sk_buff *skb)
 {
@@ -125,8 +61,17 @@ static int vti_rcv(struct sk_buff *skb)
                                  iph->saddr, iph->daddr, 0);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
+               u32 oldmark = skb->mark;
+               int ret;
+
 
-               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+               /* temporarily mark the skb with the tunnel o_key, to
+                * only match policies with this mark.
+                */
+               skb->mark = be32_to_cpu(tunnel->parms.o_key);
+               ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
+               skb->mark = oldmark;
+               if (!ret)
                        return -1;
 
                tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +80,6 @@ static int vti_rcv(struct sk_buff *skb)
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               skb->mark = 0;
                secpath_reset(skb);
                skb->dev = tunnel->dev;
                return 1;
@@ -167,7 +111,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl4, 0, sizeof(fl4));
        flowi4_init_output(&fl4, tunnel->parms.link,
-                          be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
+                          be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
                           RT_SCOPE_UNIVERSE,
                           IPPROTO_IPIP, 0,
                           dst, tiph->saddr, 0, 0);
@@ -296,9 +240,8 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
        iph->ihl                = 5;
 }
 
-static struct xfrm_tunnel vti_handler __read_mostly = {
+static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
        .handler        =       vti_rcv,
-       .err_handler    =       vti_err,
        .priority       =       1,
 };
 
index 7f80fb4b82d3716ad79044a43ed43545f6183957..fe3e9f7f1f0beaf0ba6a8b7b4c788c8a021ba54d 100644 (file)
@@ -220,17 +220,17 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb->protocol != htons(ETH_P_IP)))
                goto tx_error;
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto out;
 
        ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
        return NETDEV_TX_OK;
 
 tx_error:
-       dev->stats.tx_errors++;
        dev_kfree_skb(skb);
+out:
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -275,6 +275,7 @@ static const struct net_device_ops ipip_netdev_ops = {
 #define IPIP_FEATURES (NETIF_F_SG |            \
                       NETIF_F_FRAGLIST |       \
                       NETIF_F_HIGHDMA |        \
+                      NETIF_F_GSO_SOFTWARE |   \
                       NETIF_F_HW_CSUM)
 
 static void ipip_tunnel_setup(struct net_device *dev)
index 1657e39b291f2ae8747e21e944627def23ebfcbb..40d56073cd19d3b3c27372b7dca255469d7e0068 100644 (file)
@@ -36,6 +36,27 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
+config NF_TABLES_IPV4
+       depends on NF_TABLES
+       tristate "IPv4 nf_tables support"
+
+config NFT_REJECT_IPV4
+       depends on NF_TABLES_IPV4
+       tristate "nf_tables IPv4 reject support"
+
+config NFT_CHAIN_ROUTE_IPV4
+       depends on NF_TABLES_IPV4
+       tristate "IPv4 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV4
+       depends on NF_TABLES_IPV4
+       depends on NF_NAT_IPV4 && NFT_NAT
+       tristate "IPv4 nf_tables nat chain support"
+
+config NF_TABLES_ARP
+       depends on NF_TABLES
+       tristate "ARP nf_tables support"
+
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
index 3622b248b6dd7ad78aa4411243b4b507731d7e2b..19df72b7ba8810e698bf6d50e8eee5c23033b34a 100644 (file)
@@ -27,6 +27,12 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
 # NAT protocols (nf_nat)
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
 
+obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
index 85a4f21aac1ad117f740d40da7123a663fdfefa8..59da7cde072447c2331422659adb7dadad4f3fae 100644 (file)
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        local_bh_disable();
        addend = xt_write_recseq_begin();
        private = table->private;
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        table_base = private->entries[smp_processor_id()];
 
        e = get_entry(table_base, private->hook_entry[hook]);
index a865f6f9401318663a2e973ac34cf707be4e1452..802ddecb30b8110474da0e0a34c134aceaece43b 100644 (file)
@@ -27,13 +27,14 @@ static const struct xt_table packet_filter = {
 
 /* The work comes in here from netfilter.c */
 static unsigned int
-arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
+       return arpt_do_table(skb, ops->hooknum, in, out,
+                            net->ipv4.arptable_filter);
 }
 
 static struct nf_hook_ops *arpfilter_ops __read_mostly;
index d23118d95ff9291401396953d094391db0e2e44e..718dfbd30cbe09560c1d545525b446fb5695f6af 100644 (file)
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
        addend = xt_write_recseq_begin();
        private = table->private;
        cpu        = smp_processor_id();
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        table_base = private->entries[cpu];
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
        stackptr   = per_cpu_ptr(private->stackptr, cpu);
index 0b732efd32e20aa8e22e6c9bd5aa4edfb7e5516d..a2e2b61cd7da230475f8ce7b9818eba67090bf05 100644 (file)
@@ -483,7 +483,7 @@ static void arp_print(struct arp_payload *payload)
 #endif
 
 static unsigned int
-arp_mangle(unsigned int hook,
+arp_mangle(const struct nf_hook_ops *ops,
           struct sk_buff *skb,
           const struct net_device *in,
           const struct net_device *out,
index 67e17dcda65e64f27b9ca5b244561ab2d7fc594f..01cffeaa0085ede5bb802999fc1f29c9e6ec4b7f 100644 (file)
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        if (th == NULL)
                return NF_DROP;
 
-       synproxy_parse_options(skb, par->thoff, th, &opts);
+       if (!synproxy_parse_options(skb, par->thoff, th, &opts))
+               return NF_DROP;
 
        if (th->syn && !(th->ack || th->fin || th->rst)) {
                /* Initial SYN from client */
@@ -296,7 +297,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
 
                /* fall through */
        case TCP_CONNTRACK_SYN_SENT:
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
 
                if (!th->syn && th->ack &&
                    CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
                if (!th->syn || !th->ack)
                        break;
 
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
+
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy->tsoff = opts.tsval - synproxy->its;
 
index cbc22158af490589833a4918f3610050a60d5be4..9cb993cd224bf702ca48382a0656163837433c54 100644 (file)
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
        ub->qlen++;
 
        pm = nlmsg_data(nlh);
+       memset(pm, 0, sizeof(*pm));
 
        /* We might not have a timestamp, get one */
        if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
        }
        else if (loginfo->prefix[0] != '\0')
                strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
-       else
-               *(pm->prefix) = '\0';
 
        if (in && in->hard_header_len > 0 &&
            skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
 
        if (in)
                strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
-       else
-               pm->indev_name[0] = '\0';
 
        if (out)
                strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
-       else
-               pm->outdev_name[0] = '\0';
 
        /* copy_len <= skb->len, so can't fail. */
        if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
index 50af5b45c050c22773db181353e2aee9e02137e3..e08a74a243a85d125ccbd043314f06c6b528b368 100644 (file)
@@ -33,20 +33,21 @@ static const struct xt_table packet_filter = {
 };
 
 static unsigned int
-iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                    const struct net_device *in, const struct net_device *out,
                    int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT &&
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
+       return ipt_do_table(skb, ops->hooknum, in, out,
+                           net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 0d8cd82e0fadf676bebd1a3cb3b5b559159b3491..6a5079c34bb363c34135e9bed5700a645f15b249 100644 (file)
@@ -79,19 +79,19 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_mangle_hook(unsigned int hook,
+iptable_mangle_hook(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       if (hook == NF_INET_LOCAL_OUT)
+       if (ops->hooknum == NF_INET_LOCAL_OUT)
                return ipt_mangle_out(skb, out);
-       if (hook == NF_INET_POST_ROUTING)
-               return ipt_do_table(skb, hook, in, out,
+       if (ops->hooknum == NF_INET_POST_ROUTING)
+               return ipt_do_table(skb, ops->hooknum, in, out,
                                    dev_net(out)->ipv4.iptable_mangle);
        /* PREROUTING/INPUT/FORWARD: */
-       return ipt_do_table(skb, hook, in, out,
+       return ipt_do_table(skb, ops->hooknum, in, out,
                            dev_net(in)->ipv4.iptable_mangle);
 }
 
index 683bfaffed65da561cb3a1c2eed598fb8cbe32a1..ee2886126e3dfad44e7c801d82ad2ec4bad621d3 100644 (file)
@@ -61,7 +61,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_fn(unsigned int hooknum,
+nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -71,7 +71,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
        enum ip_conntrack_info ctinfo;
        struct nf_conn_nat *nat;
        /* maniptype == SRC for postrouting. */
-       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
 
        /* We never see fragments: conntrack defrags on pre-routing
         * and local-out, and nf_nat_out protects post-routing.
@@ -108,7 +108,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
        case IP_CT_RELATED_REPLY:
                if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
-                                                          hooknum))
+                                                          ops->hooknum))
                                return NF_DROP;
                        else
                                return NF_ACCEPT;
@@ -121,14 +121,14 @@ nf_nat_ipv4_fn(unsigned int hooknum,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
                } else {
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                                goto oif_changed;
                }
                break;
@@ -137,11 +137,11 @@ nf_nat_ipv4_fn(unsigned int hooknum,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                        goto oif_changed;
        }
 
-       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
        nf_ct_kill_acct(ct, ctinfo, skb);
@@ -149,7 +149,7 @@ oif_changed:
 }
 
 static unsigned int
-nf_nat_ipv4_in(unsigned int hooknum,
+nf_nat_ipv4_in(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -158,7 +158,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
        unsigned int ret;
        __be32 daddr = ip_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            daddr != ip_hdr(skb)->daddr)
                skb_dst_drop(skb);
@@ -167,7 +167,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_out(unsigned int hooknum,
+nf_nat_ipv4_out(const struct nf_hook_ops *ops,
                struct sk_buff *skb,
                const struct net_device *in,
                const struct net_device *out,
@@ -185,7 +185,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -207,7 +207,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_local_fn(unsigned int hooknum,
+nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
@@ -223,7 +223,7 @@ nf_nat_ipv4_local_fn(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 1f82aea11df63e0683ba6e2c0e85d270534a6d74..b2f7e8f98316d2733e3936ead8dec20da2978f73 100644 (file)
@@ -20,20 +20,20 @@ static const struct xt_table packet_raw = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
+iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                 const struct net_device *in, const struct net_device *out,
                 int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT && 
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
+       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index f867a8d38bf7513d38d3da645fb21c2096f5e2a5..c86647ed2078f660cf0e9f8b69957a6f4c79b1f6 100644 (file)
@@ -37,21 +37,22 @@ static const struct xt_table security_table = {
 };
 
 static unsigned int
-iptable_security_hook(unsigned int hook, struct sk_buff *skb,
+iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT &&
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* Somebody is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
+       return ipt_do_table(skb, ops->hooknum, in, out,
+                           net->ipv4.iptable_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index 86f5b34a4ed1865cd1438817bfb46f6eb589f61e..ecd8bec411c9719c5ef083d9781e90d0377b88e9 100644 (file)
@@ -92,7 +92,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
        return NF_ACCEPT;
 }
 
-static unsigned int ipv4_helper(unsigned int hooknum,
+static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -121,7 +121,7 @@ static unsigned int ipv4_helper(unsigned int hooknum,
                            ct, ctinfo);
 }
 
-static unsigned int ipv4_confirm(unsigned int hooknum,
+static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
                                 const struct net_device *in,
                                 const struct net_device *out,
@@ -147,16 +147,16 @@ out:
        return nf_conntrack_confirm(skb);
 }
 
-static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb);
+       return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
 }
 
-static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -166,7 +166,7 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
-       return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
+       return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
index 742815518b0fd5ea78ec4205bfee76a8a804c88d..12e13bd82b5bba4fdd183d5ba2cda098a1c0c683 100644 (file)
@@ -60,7 +60,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
                return IP_DEFRAG_CONNTRACK_OUT + zone;
 }
 
-static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
                                          const struct net_device *in,
                                          const struct net_device *out,
@@ -83,7 +83,9 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
 #endif
        /* Gather fragments. */
        if (ip_is_fragment(ip_hdr(skb))) {
-               enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+               enum ip_defrag_users user =
+                       nf_ct_defrag_user(ops->hooknum, skb);
+
                if (nf_ct_ipv4_gather_frags(skb, user))
                        return NF_STOLEN;
        }
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
new file mode 100644 (file)
index 0000000..3e67ef1
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2008-2010 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netfilter_arp.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_arp __read_mostly = {
+       .family         = NFPROTO_ARP,
+       .nhooks         = NF_ARP_NUMHOOKS,
+       .owner          = THIS_MODULE,
+};
+
+static int nf_tables_arp_init_net(struct net *net)
+{
+       net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.arp== NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
+
+       if (nft_register_afinfo(net, net->nft.arp) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.arp);
+       return -ENOMEM;
+}
+
+static void nf_tables_arp_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.arp);
+       kfree(net->nft.arp);
+}
+
+static struct pernet_operations nf_tables_arp_net_ops = {
+       .init   = nf_tables_arp_init_net,
+       .exit   = nf_tables_arp_exit_net,
+};
+
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_arp = {
+       .family         = NFPROTO_ARP,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_ARP_IN) |
+                         (1 << NF_ARP_OUT) |
+                         (1 << NF_ARP_FORWARD),
+       .fn             = {
+               [NF_ARP_IN]             = nft_do_chain_arp,
+               [NF_ARP_OUT]            = nft_do_chain_arp,
+               [NF_ARP_FORWARD]        = nft_do_chain_arp,
+       },
+};
+
+static int __init nf_tables_arp_init(void)
+{
+       int ret;
+
+       nft_register_chain_type(&filter_arp);
+       ret = register_pernet_subsys(&nf_tables_arp_net_ops);
+       if (ret < 0)
+               nft_unregister_chain_type(&filter_arp);
+
+       return ret;
+}
+
+static void __exit nf_tables_arp_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_arp_net_ops);
+       nft_unregister_chain_type(&filter_arp);
+}
+
+module_init(nf_tables_arp_init);
+module_exit(nf_tables_arp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
new file mode 100644 (file)
index 0000000..8f7536b
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/net_namespace.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+
+static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
+                                   struct sk_buff *skb,
+                                   const struct net_device *in,
+                                   const struct net_device *out,
+                                   int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       if (unlikely(skb->len < sizeof(struct iphdr) ||
+                    ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
+               if (net_ratelimit())
+                       pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
+                               "packet\n");
+               return NF_ACCEPT;
+       }
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv4 __read_mostly = {
+       .family         = NFPROTO_IPV4,
+       .nhooks         = NF_INET_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nft_ipv4_output,
+       },
+};
+
+static int nf_tables_ipv4_init_net(struct net *net)
+{
+       net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.ipv4 == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
+
+       if (nft_register_afinfo(net, net->nft.ipv4) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.ipv4);
+       return -ENOMEM;
+}
+
+static void nf_tables_ipv4_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.ipv4);
+       kfree(net->nft.ipv4);
+}
+
+static struct pernet_operations nf_tables_ipv4_net_ops = {
+       .init   = nf_tables_ipv4_init_net,
+       .exit   = nf_tables_ipv4_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_INET_LOCAL_IN) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_FORWARD) |
+                         (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING),
+       .fn             = {
+               [NF_INET_LOCAL_IN]      = nft_do_chain_ipv4,
+               [NF_INET_LOCAL_OUT]     = nft_ipv4_output,
+               [NF_INET_FORWARD]       = nft_do_chain_ipv4,
+               [NF_INET_PRE_ROUTING]   = nft_do_chain_ipv4,
+               [NF_INET_POST_ROUTING]  = nft_do_chain_ipv4,
+       },
+};
+
+static int __init nf_tables_ipv4_init(void)
+{
+       nft_register_chain_type(&filter_ipv4);
+       return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+}
+
+static void __exit nf_tables_ipv4_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
+       nft_unregister_chain_type(&filter_ipv4);
+}
+
+module_init(nf_tables_ipv4_init);
+module_exit(nf_tables_ipv4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET);
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
new file mode 100644 (file)
index 0000000..cf2c792
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+/*
+ * NAT chains
+ */
+
+static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+       struct nft_pktinfo pkt;
+       unsigned int ret;
+
+       if (ct == NULL || nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
+
+       nat = nfct_nat(ct);
+       if (nat == NULL) {
+               /* Conntrack module was loaded late, can't add extension. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL)
+                       return NF_ACCEPT;
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED + IP_CT_IS_REPLY:
+               if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+                       if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+                                                          ops->hooknum))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall through */
+       case IP_CT_NEW:
+               if (nf_nat_initialized(ct, maniptype))
+                       break;
+
+               nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+               ret = nft_do_chain_pktinfo(&pkt, ops);
+               if (ret != NF_ACCEPT)
+                       return ret;
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               }
+       default:
+               break;
+       }
+
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
+                                     const struct net_device *in,
+                                     const struct net_device *out,
+                                     int (*okfn)(struct sk_buff *))
+{
+       __be32 daddr = ip_hdr(skb)->daddr;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ip_hdr(skb)->daddr != daddr) {
+               skb_dst_drop(skb);
+       }
+       return ret;
+}
+
+static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
+                                      const struct net_device *in,
+                                      const struct net_device *out,
+                                      int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo __maybe_unused;
+       const struct nf_conn *ct __maybe_unused;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (ct->tuplehash[dir].tuple.src.u3.ip !=
+                   ct->tuplehash[!dir].tuple.dst.u3.ip ||
+                   ct->tuplehash[dir].tuple.src.u.all !=
+                   ct->tuplehash[!dir].tuple.dst.u.all)
+                       return nf_xfrm_me_harder(skb, AF_INET) == 0 ?
+                                                               ret : NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
+                                 struct sk_buff *skb,
+                                 const struct net_device *in,
+                                 const struct net_device *out,
+                                 int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+                   ct->tuplehash[!dir].tuple.src.u3.ip) {
+                       if (ip_route_me_harder(skb, RTN_UNSPEC))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "nat",
+       .type           = NFT_CHAIN_T_NAT,
+       .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .fn             = {
+               [NF_INET_PRE_ROUTING]   = nf_nat_prerouting,
+               [NF_INET_POST_ROUTING]  = nf_nat_postrouting,
+               [NF_INET_LOCAL_OUT]     = nf_nat_output,
+               [NF_INET_LOCAL_IN]      = nf_nat_fn,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_init(void)
+{
+       int err;
+
+       err = nft_register_chain_type(&nft_chain_nat_ipv4);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_chain_nat_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_nat_ipv4);
+}
+
+module_init(nft_chain_nat_init);
+module_exit(nft_chain_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
new file mode 100644 (file)
index 0000000..4e6bf9a
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+                                       struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct nft_pktinfo pkt;
+       u32 mark;
+       __be32 saddr, daddr;
+       u_int8_t tos;
+       const struct iphdr *iph;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct iphdr) ||
+           ip_hdrlen(skb) < sizeof(struct iphdr))
+               return NF_ACCEPT;
+
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       mark = skb->mark;
+       iph = ip_hdr(skb);
+       saddr = iph->saddr;
+       daddr = iph->daddr;
+       tos = iph->tos;
+
+       ret = nft_do_chain_pktinfo(&pkt, ops);
+       if (ret != NF_DROP && ret != NF_QUEUE) {
+               iph = ip_hdr(skb);
+
+               if (iph->saddr != saddr ||
+                   iph->daddr != daddr ||
+                   skb->mark != mark ||
+                   iph->tos != tos)
+                       if (ip_route_me_harder(skb, RTN_UNSPEC))
+                               ret = NF_DROP;
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .fn             = {
+               [NF_INET_LOCAL_OUT]     = nf_route_table_hook,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+       return nft_register_chain_type(&nft_chain_route_ipv4);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_route_ipv4);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644 (file)
index 0000000..fff5ba1
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+
+struct nft_reject {
+       enum nft_reject_types   type:8;
+       u8                      icmp_code;
+};
+
+static void nft_reject_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1],
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_reject *priv = nft_expr_priv(expr);
+
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               icmp_send(pkt->skb, ICMP_DEST_UNREACH, priv->icmp_code, 0);
+               break;
+       case NFT_REJECT_TCP_RST:
+               break;
+       }
+
+       data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+       [NFTA_REJECT_TYPE]              = { .type = NLA_U32 },
+       [NFTA_REJECT_ICMP_CODE]         = { .type = NLA_U8 },
+};
+
+static int nft_reject_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_reject *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_REJECT_TYPE] == NULL)
+               return -EINVAL;
+
+       priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+                       return -EINVAL;
+               priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+       case NFT_REJECT_TCP_RST:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_reject *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+               goto nla_put_failure;
+
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+                       goto nla_put_failure;
+               break;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_reject_type;
+static const struct nft_expr_ops nft_reject_ops = {
+       .type           = &nft_reject_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+       .eval           = nft_reject_eval,
+       .init           = nft_reject_init,
+       .dump           = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_type __read_mostly = {
+       .name           = "reject",
+       .ops            = &nft_reject_ops,
+       .policy         = nft_reject_policy,
+       .maxattr        = NFTA_REJECT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_reject_module_init(void)
+{
+       return nft_register_expr(&nft_reject_type);
+}
+
+static void __exit nft_reject_module_exit(void)
+{
+       nft_unregister_expr(&nft_reject_type);
+}
+
+module_init(nft_reject_module_init);
+module_exit(nft_reject_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("reject");
index d7d9882d4caea169964a58e294ffe6c73a99d36c..9afbdb19f4a2f5dc1bccb6cf5d73db795fbf5f03 100644 (file)
@@ -202,15 +202,14 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
 #if IS_ENABLED(CONFIG_IPV6)
                } else if (skb->protocol == htons(ETH_P_IPV6) &&
                           sk->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
 
                        pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
                                 (int) isk->inet_num,
-                                &inet6_sk(sk)->rcv_saddr,
+                                &sk->sk_v6_rcv_saddr,
                                 sk->sk_bound_dev_if);
 
-                       if (!ipv6_addr_any(&np->rcv_saddr) &&
-                           !ipv6_addr_equal(&np->rcv_saddr,
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
                                             &ipv6_hdr(skb)->daddr))
                                continue;
 #endif
@@ -237,11 +236,11 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 
@@ -362,7 +361,7 @@ static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
        } else if (saddr->sa_family == AF_INET6) {
                struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
                struct ipv6_pinfo *np = inet6_sk(sk);
-               np->rcv_saddr = np->saddr = addr->sin6_addr;
+               sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
 #endif
        }
 }
@@ -376,7 +375,7 @@ static void ping_clear_saddr(struct sock *sk, int dif)
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (sk->sk_family == AF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
-               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
                memset(&np->saddr, 0, sizeof(np->saddr));
 #endif
        }
@@ -416,10 +415,12 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 (int)sk->sk_bound_dev_if);
 
        err = 0;
-       if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
-           (sk->sk_family == AF_INET6 &&
-            !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
+       if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#endif
 
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -429,7 +430,7 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == AF_INET6)
-               memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+               memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
 #endif
 
        sk_dst_reset(sk);
@@ -713,6 +714,8 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.opt = NULL;
        ipc.oif = sk->sk_bound_dev_if;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        sock_tx_timestamp(sk, &ipc.tx_flags);
 
@@ -744,7 +747,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        return -EINVAL;
                faddr = ipc.opt->opt.faddr;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
index bfec521c717fd2320242c24e7a7f74a64c1c1a44..41e1d2845c8f6690b5588888134bd80e49fc5dcf 100644 (file)
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
                ipv4_sk_update_pmtu(skb, sk, info);
-       else if (type == ICMP_REDIRECT)
+       else if (type == ICMP_REDIRECT) {
                ipv4_sk_redirect(skb, sk);
+               return;
+       }
 
        /* Report error on raw socket, if:
           1. User requested ip_recverr.
@@ -297,7 +299,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        /* Charge it to the socket. */
 
-       ipv4_pktinfo_prepare(skb);
+       ipv4_pktinfo_prepare(sk, skb);
        if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
@@ -517,6 +519,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.addr = inet->inet_saddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
        ipc.oif = sk->sk_bound_dev_if;
 
        if (msg->msg_controllen) {
@@ -556,7 +560,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        daddr = ipc.opt->opt.faddr;
                }
        }
-       tos = RT_CONN_FLAGS(sk);
+       tos = get_rtconn_flags(&ipc, sk);
        if (msg->msg_flags & MSG_DONTROUTE)
                tos |= RTO_ONLINK;
 
index 727f4365bcdff3acdb415fe75fd878a3bc5050af..d2d325382b13f4343d70974c327af3527ef1fcae 100644 (file)
@@ -295,7 +295,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
                   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
                   dst_entries_get_slow(&ipv4_dst_ops),
-                  st->in_hit,
+                  0, /* st->in_hit */
                   st->in_slow_tot,
                   st->in_slow_mc,
                   st->in_no_route,
@@ -303,16 +303,16 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
                   st->in_martian_dst,
                   st->in_martian_src,
 
-                  st->out_hit,
+                  0, /* st->out_hit */
                   st->out_slow_tot,
                   st->out_slow_mc,
 
-                  st->gc_total,
-                  st->gc_ignored,
-                  st->gc_goal_miss,
-                  st->gc_dst_overflow,
-                  st->in_hlist_search,
-                  st->out_hlist_search
+                  0, /* st->gc_total */
+                  0, /* st->gc_ignored */
+                  0, /* st->gc_goal_miss */
+                  0, /* st->gc_dst_overflow */
+                  0, /* st->in_hlist_search */
+                  0  /* st->out_hlist_search */
                );
        return 0;
 }
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                                                              RT_SCOPE_LINK);
                        goto make_route;
                }
-               if (fl4->saddr) {
+               if (!fl4->saddr) {
                        if (ipv4_is_multicast(fl4->daddr))
                                fl4->saddr = inet_select_addr(dev_out, 0,
                                                              fl4->flowi4_scope);
index 14a15c49129df8984076c05c4dd757012fed03b3..b95331e6c077cea0ff215702087fa14acd00abaf 100644 (file)
 
 extern int sysctl_tcp_syncookies;
 
-__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-EXPORT_SYMBOL(syncookie_secret);
-
-static __init int init_syncookies(void)
-{
-       get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
-       return 0;
-}
-__initcall(init_syncookies);
+static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -44,8 +36,11 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
                       u32 count, int c)
 {
-       __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
+       __u32 *tmp;
+
+       net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
 
+       tmp  = __get_cpu_var(ipv4_cookie_scratch);
        memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
        tmp[0] = (__force u32)saddr;
        tmp[1] = (__force u32)daddr;
@@ -89,8 +84,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
 
 
 static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
-                                  __be16 dport, __u32 sseq, __u32 count,
-                                  __u32 data)
+                                  __be16 dport, __u32 sseq, __u32 data)
 {
        /*
         * Compute the secure sequence number.
@@ -102,7 +96,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
         * As an extra hack, we add a small "data" value that encodes the
         * MSS into the second hash value.
         */
-
+       u32 count = tcp_cookie_time();
        return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
                sseq + (count << COOKIEBITS) +
                ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -114,22 +108,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
  * If the syncookie is bad, the data returned will be out of
  * range.  This must be checked by the caller.
  *
- * The count value used to generate the cookie must be within
- * "maxdiff" if the current (passed-in) "count".  The return value
- * is (__u32)-1 if this test fails.
+ * The count value used to generate the cookie must be less than
+ * MAX_SYNCOOKIE_AGE minutes in the past.
+ * The return value (__u32)-1 if this test fails.
  */
 static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
-                                 __be16 sport, __be16 dport, __u32 sseq,
-                                 __u32 count, __u32 maxdiff)
+                                 __be16 sport, __be16 dport, __u32 sseq)
 {
-       __u32 diff;
+       u32 diff, count = tcp_cookie_time();
 
        /* Strip away the layers from the cookie */
        cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
        /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
        diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
-       if (diff >= maxdiff)
+       if (diff >= MAX_SYNCOOKIE_AGE)
                return (__u32)-1;
 
        return (cookie -
@@ -138,22 +131,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
 }
 
 /*
- * MSS Values are taken from the 2009 paper
- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
- *  - values 1440 to 1460 accounted for 80% of observed mss values
- *  - values outside the 536-1460 range are rare (<0.2%).
+ * MSS Values are chosen based on the 2011 paper
+ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
+ * Values ..
+ *  .. lower than 536 are rare (< 0.2%)
+ *  .. between 537 and 1299 account for less than < 1.5% of observed values
+ *  .. in the 1300-1349 range account for about 15 to 20% of observed mss values
+ *  .. exceeding 1460 are very rare (< 0.04%)
  *
- * Table must be sorted.
+ *  1460 is the single most frequently announced mss value (30 to 46% depending
+ *  on monitor location).  Table must be sorted.
  */
 static __u16 const msstab[] = {
-       64,
-       512,
        536,
-       1024,
-       1440,
+       1300,
+       1440,   /* 1440, 1452: PPPoE */
        1460,
-       4312,
-       8960,
 };
 
 /*
@@ -173,7 +166,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 
        return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
                                     th->source, th->dest, ntohl(th->seq),
-                                    jiffies / (HZ * 60), mssind);
+                                    mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
@@ -188,13 +181,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
        return __cookie_v4_init_sequence(iph, th, mssp);
 }
 
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
 /*
  * Check if a ack sequence number is a valid syncookie.
  * Return the decoded mss if it is, or 0 if not.
@@ -204,9 +190,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 {
        __u32 seq = ntohl(th->seq) - 1;
        __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
-                                           th->source, th->dest, seq,
-                                           jiffies / (HZ * 60),
-                                           COUNTER_TRIES);
+                                           th->source, th->dest, seq);
 
        return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -315,10 +299,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        treq->rcv_isn           = ntohl(th->seq) - 1;
        treq->snt_isn           = cookie;
        req->mss                = mss;
-       ireq->loc_port          = th->dest;
-       ireq->rmt_port          = th->source;
-       ireq->loc_addr          = ip_hdr(skb)->daddr;
-       ireq->rmt_addr          = ip_hdr(skb)->saddr;
+       ireq->ir_num            = ntohs(th->dest);
+       ireq->ir_rmt_port       = th->source;
+       ireq->ir_loc_addr       = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr       = ip_hdr(skb)->saddr;
        ireq->ecn_ok            = ecn_ok;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -358,8 +342,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
-                          (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-                          ireq->loc_addr, th->source, th->dest);
+                          (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, th->source, th->dest);
        security_req_classify_flow(req, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(sock_net(sk), &fl4);
        if (IS_ERR(rt)) {
index 540279f4c531be079e45c027aa11d37920c14a88..d5b1390eebbee19715fcbbdbea742bbf0eddcc71 100644 (file)
@@ -43,12 +43,12 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 
 /* Update system visible IP port range */
-static void set_local_port_range(int range[2])
+static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&sysctl_local_ports.lock);
-       sysctl_local_ports.range[0] = range[0];
-       sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] = range[0];
+       net->ipv4.sysctl_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -56,6 +56,8 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -66,14 +68,15 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                .extra2 = &ip_local_port_range_max,
        };
 
-       inet_get_local_port_range(range, range + 1);
+       inet_get_local_port_range(net, &range[0], &range[1]);
+
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
        if (write && ret == 0) {
                if (range[1] < range[0])
                        ret = -EINVAL;
                else
-                       set_local_port_range(range);
+                       set_local_port_range(net, range);
        }
 
        return ret;
@@ -83,23 +86,27 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
 static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
        kgid_t *data = table->data;
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
 static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
        kgid_t *data = table->data;
-       write_seqlock(&sysctl_local_ports.lock);
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -193,49 +200,6 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
        return ret;
 }
 
-static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
-                          void __user *buffer, size_t *lenp,
-                          loff_t *ppos)
-{
-       int ret;
-       unsigned long vec[3];
-       struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_MEMCG_KMEM
-       struct mem_cgroup *memcg;
-#endif
-
-       struct ctl_table tmp = {
-               .data = &vec,
-               .maxlen = sizeof(vec),
-               .mode = ctl->mode,
-       };
-
-       if (!write) {
-               ctl->data = &net->ipv4.sysctl_tcp_mem;
-               return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
-       }
-
-       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
-       if (ret)
-               return ret;
-
-#ifdef CONFIG_MEMCG_KMEM
-       rcu_read_lock();
-       memcg = mem_cgroup_from_task(current);
-
-       tcp_prot_mem(memcg, vec[0], 0);
-       tcp_prot_mem(memcg, vec[1], 1);
-       tcp_prot_mem(memcg, vec[2], 2);
-       rcu_read_unlock();
-#endif
-
-       net->ipv4.sysctl_tcp_mem[0] = vec[0];
-       net->ipv4.sysctl_tcp_mem[1] = vec[1];
-       net->ipv4.sysctl_tcp_mem[2] = vec[2];
-
-       return 0;
-}
-
 static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
                                 void __user *buffer, size_t *lenp,
                                 loff_t *ppos)
@@ -267,6 +231,11 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
+               /* Generate a dummy secret but don't publish it. This
+                * is needed so we don't regenerate a new key on the
+                * first invocation of tcp_fastopen_cookie_gen
+                */
+               tcp_fastopen_init_key_once(false);
                tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
        }
 
@@ -474,13 +443,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ip_local_port_range",
-               .data           = &sysctl_local_ports.range,
-               .maxlen         = sizeof(sysctl_local_ports.range),
-               .mode           = 0644,
-               .proc_handler   = ipv4_local_port_range,
-       },
        {
                .procname       = "ip_local_reserved_ports",
                .data           = NULL, /* initialized in sysctl_ipv4_init */
@@ -551,6 +513,13 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_mem",
+               .maxlen         = sizeof(sysctl_tcp_mem),
+               .data           = &sysctl_tcp_mem,
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax,
+       },
        {
                .procname       = "tcp_wmem",
                .data           = &sysctl_tcp_wmem,
@@ -854,10 +823,11 @@ static struct ctl_table ipv4_net_table[] = {
                .proc_handler   = proc_dointvec
        },
        {
-               .procname       = "tcp_mem",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
+               .procname       = "ip_local_port_range",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
+               .data           = &init_net.ipv4.sysctl_local_ports.range,
                .mode           = 0644,
-               .proc_handler   = ipv4_tcp_mem,
+               .proc_handler   = ipv4_local_port_range,
        },
        { }
 };
@@ -868,30 +838,15 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
+               int i;
+
                table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
 
-               table[0].data =
-                       &net->ipv4.sysctl_icmp_echo_ignore_all;
-               table[1].data =
-                       &net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
-               table[2].data =
-                       &net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
-               table[3].data =
-                       &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
-               table[4].data =
-                       &net->ipv4.sysctl_icmp_ratelimit;
-               table[5].data =
-                       &net->ipv4.sysctl_icmp_ratemask;
-               table[6].data =
-                       &net->ipv4.sysctl_ping_group_range;
-               table[7].data =
-                       &net->ipv4.sysctl_tcp_ecn;
-
-               /* Don't export sysctls to unprivileged users */
-               if (net->user_ns != &init_user_ns)
-                       table[0].procname = NULL;
+               /* Update the variables to point into the current struct net */
+               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+                       table[i].data += (void *)net - (void *)&init_net;
        }
 
        /*
@@ -901,7 +856,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
        net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
-       tcp_init_mem(net);
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] =  32768;
+       net->ipv4.sysctl_local_ports.range[1] =  61000;
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
index 6e5617b9f9db6f3c2aeacc7923ea763660761d99..4f328544c07517a6544d5893039aec10829f6959 100644 (file)
@@ -288,9 +288,11 @@ int sysctl_tcp_min_tso_segs __read_mostly = 2;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
+long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
+EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -1429,7 +1431,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
        do {
                if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
                                              last_issued, &done,
-                                             &used) == DMA_SUCCESS) {
+                                             &used) == DMA_COMPLETE) {
                        /* Safe to free early-copied skbs now */
                        __skb_queue_purge(&sk->sk_async_wait_queue);
                        break;
@@ -1437,7 +1439,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
                        struct sk_buff *skb;
                        while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
                               (dma_async_is_complete(skb->dma_cookie, done,
-                                                     used) == DMA_SUCCESS)) {
+                                                     used) == DMA_COMPLETE)) {
                                __skb_dequeue(&sk->sk_async_wait_queue);
                                kfree_skb(skb);
                        }
@@ -3097,13 +3099,13 @@ static int __init set_thash_entries(char *str)
 }
 __setup("thash_entries=", set_thash_entries);
 
-void tcp_init_mem(struct net *net)
+static void tcp_init_mem(void)
 {
        unsigned long limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
-       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
-       net->ipv4.sysctl_tcp_mem[1] = limit;
-       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+       sysctl_tcp_mem[0] = limit / 4 * 3;
+       sysctl_tcp_mem[1] = limit;
+       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
 }
 
 void __init tcp_init(void)
@@ -3137,10 +3139,9 @@ void __init tcp_init(void)
                                        &tcp_hashinfo.ehash_mask,
                                        0,
                                        thash_entries ? 0 : 512 * 1024);
-       for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
+       for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
                INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
-               INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
-       }
+
        if (inet_ehash_locks_alloc(&tcp_hashinfo))
                panic("TCP: failed to alloc ehash_locks");
        tcp_hashinfo.bhash =
@@ -3166,7 +3167,7 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       tcp_init_mem(&init_net);
+       tcp_init_mem();
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
        limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
        max_wshare = min(4UL*1024*1024, limit);
index ab7bd35bb312c6e9e07aa2950d75ec4bbc982eac..766032b4a6c39b9894c95b5eeef1689e355605ff 100644 (file)
@@ -14,6 +14,20 @@ struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 
 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
 
+void tcp_fastopen_init_key_once(bool publish)
+{
+       static u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+       /* tcp_fastopen_reset_cipher publishes the new context
+        * atomically, so we allow this race happening here.
+        *
+        * All call sites of tcp_fastopen_cookie_gen also check
+        * for a valid cookie, so this is an acceptable risk.
+        */
+       if (net_get_random_once(key, sizeof(key)) && publish)
+               tcp_fastopen_reset_cipher(key, sizeof(key));
+}
+
 static void tcp_fastopen_ctx_free(struct rcu_head *head)
 {
        struct tcp_fastopen_context *ctx =
@@ -70,6 +84,8 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
        __be32 path[4] = { src, dst, 0, 0 };
        struct tcp_fastopen_context *ctx;
 
+       tcp_fastopen_init_key_once(true);
+
        rcu_read_lock();
        ctx = rcu_dereference(tcp_fastopen_ctx);
        if (ctx) {
@@ -78,14 +94,3 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
        }
        rcu_read_unlock();
 }
-
-static int __init tcp_fastopen_init(void)
-{
-       __u8 key[TCP_FASTOPEN_KEY_LENGTH];
-
-       get_random_bytes(key, sizeof(key));
-       tcp_fastopen_reset_cipher(key, sizeof(key));
-       return 0;
-}
-
-late_initcall(tcp_fastopen_init);
index 25a89eaa669de9240036abc04f1385eb168f995e..b935397c703c569bf01e6ebfd85e6bfd1b474227 100644 (file)
@@ -267,11 +267,31 @@ static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
  */
 
-static void tcp_fixup_sndbuf(struct sock *sk)
+static void tcp_sndbuf_expand(struct sock *sk)
 {
-       int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
+       const struct tcp_sock *tp = tcp_sk(sk);
+       int sndmem, per_mss;
+       u32 nr_segs;
+
+       /* Worst case is non GSO/TSO : each frame consumes one skb
+        * and skb->head is kmalloced using power of two area of memory
+        */
+       per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+                 MAX_TCP_HEADER +
+                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       per_mss = roundup_pow_of_two(per_mss) +
+                 SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+       nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+       nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
+
+       /* Fast Recovery (RFC 5681 3.2) :
+        * Cubic needs 1.7 factor, rounded to 2 to include
+        * extra cushion (application might react slowly to POLLOUT)
+        */
+       sndmem = 2 * nr_segs * per_mss;
 
-       sndmem *= TCP_INIT_CWND;
        if (sk->sk_sndbuf < sndmem)
                sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
 }
@@ -355,6 +375,12 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
        rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
                 tcp_default_init_rwnd(mss);
 
+       /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
+        * Allow enough cushion so that sender is not limited by our window
+        */
+       if (sysctl_tcp_moderate_rcvbuf)
+               rcvmem <<= 2;
+
        if (sk->sk_rcvbuf < rcvmem)
                sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
 }
@@ -370,9 +396,11 @@ void tcp_init_buffer_space(struct sock *sk)
        if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
                tcp_fixup_rcvbuf(sk);
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
-               tcp_fixup_sndbuf(sk);
+               tcp_sndbuf_expand(sk);
 
        tp->rcvq_space.space = tp->rcv_wnd;
+       tp->rcvq_space.time = tcp_time_stamp;
+       tp->rcvq_space.seq = tp->copied_seq;
 
        maxwin = tcp_full_space(sk);
 
@@ -512,48 +540,62 @@ void tcp_rcv_space_adjust(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int time;
-       int space;
-
-       if (tp->rcvq_space.time == 0)
-               goto new_measure;
+       int copied;
 
        time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
                return;
 
-       space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
+       /* Number of bytes copied to user in last RTT */
+       copied = tp->copied_seq - tp->rcvq_space.seq;
+       if (copied <= tp->rcvq_space.space)
+               goto new_measure;
 
-       space = max(tp->rcvq_space.space, space);
+       /* A bit of theory :
+        * copied = bytes received in previous RTT, our base window
+        * To cope with packet losses, we need a 2x factor
+        * To cope with slow start, and sender growing its cwin by 100 %
+        * every RTT, we need a 4x factor, because the ACK we are sending
+        * now is for the next RTT, not the current one :
+        * <prev RTT . ><current RTT .. ><next RTT .... >
+        */
 
-       if (tp->rcvq_space.space != space) {
-               int rcvmem;
+       if (sysctl_tcp_moderate_rcvbuf &&
+           !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+               int rcvwin, rcvmem, rcvbuf;
 
-               tp->rcvq_space.space = space;
+               /* minimal window to cope with packet losses, assuming
+                * steady state. Add some cushion because of small variations.
+                */
+               rcvwin = (copied << 1) + 16 * tp->advmss;
 
-               if (sysctl_tcp_moderate_rcvbuf &&
-                   !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-                       int new_clamp = space;
+               /* If rate increased by 25%,
+                *      assume slow start, rcvwin = 3 * copied
+                * If rate increased by 50%,
+                *      assume sender can use 2x growth, rcvwin = 4 * copied
+                */
+               if (copied >=
+                   tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
+                       if (copied >=
+                           tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
+                               rcvwin <<= 1;
+                       else
+                               rcvwin += (rcvwin >> 1);
+               }
 
-                       /* Receive space grows, normalize in order to
-                        * take into account packet headers and sk_buff
-                        * structure overhead.
-                        */
-                       space /= tp->advmss;
-                       if (!space)
-                               space = 1;
-                       rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
-                       while (tcp_win_from_space(rcvmem) < tp->advmss)
-                               rcvmem += 128;
-                       space *= rcvmem;
-                       space = min(space, sysctl_tcp_rmem[2]);
-                       if (space > sk->sk_rcvbuf) {
-                               sk->sk_rcvbuf = space;
-
-                               /* Make the window clamp follow along.  */
-                               tp->window_clamp = new_clamp;
-                       }
+               rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
+               while (tcp_win_from_space(rcvmem) < tp->advmss)
+                       rcvmem += 128;
+
+               rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+               if (rcvbuf > sk->sk_rcvbuf) {
+                       sk->sk_rcvbuf = rcvbuf;
+
+                       /* Make the window clamp follow along.  */
+                       tp->window_clamp = rcvwin;
                }
        }
+       tp->rcvq_space.space = copied;
 
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
@@ -713,7 +755,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
        if (tp->srtt > 8 + 2)
                do_div(rate, tp->srtt);
 
-       sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+       /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+        * without any lock. We want to make sure compiler wont store
+        * intermediate values in this location.
+        */
+       ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+                                               sk->sk_max_pacing_rate);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -1284,7 +1331,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                tp->lost_cnt_hint -= tcp_skb_pcount(prev);
        }
 
-       TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
+       TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               TCP_SKB_CB(prev)->end_seq++;
+
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
@@ -2970,7 +3020,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb;
        u32 now = tcp_time_stamp;
-       int fully_acked = true;
+       bool fully_acked = true;
        int flag = 0;
        u32 pkts_acked = 0;
        u32 reord = tp->packets_out;
@@ -3288,7 +3338,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                        tcp_init_cwnd_reduction(sk, true);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_Open);
+                       tcp_try_keep_open(sk);
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPLOSSPROBERECOVERY);
                }
@@ -4701,15 +4751,7 @@ static void tcp_new_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_should_expand_sndbuf(sk)) {
-               int sndmem = SKB_TRUESIZE(max_t(u32,
-                                               tp->rx_opt.mss_clamp,
-                                               tp->mss_cache) +
-                                         MAX_TCP_HEADER);
-               int demanded = max_t(unsigned int, tp->snd_cwnd,
-                                    tp->reordering + 1);
-               sndmem *= 2 * demanded;
-               if (sndmem > sk->sk_sndbuf)
-                       sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+               tcp_sndbuf_expand(sk);
                tp->snd_cwnd_stamp = tcp_time_stamp;
        }
 
@@ -5674,8 +5716,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        tcp_init_congestion_control(sk);
 
                        tcp_mtup_init(sk);
-                       tcp_init_buffer_space(sk);
                        tp->copied_seq = tp->rcv_nxt;
+                       tcp_init_buffer_space(sk);
                }
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
@@ -5709,6 +5751,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                } else
                        tcp_init_metrics(sk);
 
+               tcp_update_pacing_rate(sk);
+
                /* Prevent spurious tcp_cwnd_restart() on first data packet */
                tp->lsndtime = tcp_time_stamp;
 
index b14266bb91eb5e3b1f43f3329dc2510c8be26a19..300ab2c93f29270d4dbdd2d1b65b64d8dc82200a 100644 (file)
@@ -835,11 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        skb = tcp_make_synack(sk, dst, req, NULL);
 
        if (skb) {
-               __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
+               __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
                skb_set_queue_mapping(skb, queue_mapping);
-               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-                                           ireq->rmt_addr,
+               err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+                                           ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
                if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +972,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
 {
        union tcp_md5_addr *addr;
 
-       addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+       addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
        return tcp_md5_do_lookup(sk, addr, AF_INET);
 }
 
@@ -1149,8 +1149,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
                saddr = inet_sk(sk)->inet_saddr;
                daddr = inet_sk(sk)->inet_daddr;
        } else if (req) {
-               saddr = inet_rsk(req)->loc_addr;
-               daddr = inet_rsk(req)->rmt_addr;
+               saddr = inet_rsk(req)->ir_loc_addr;
+               daddr = inet_rsk(req)->ir_rmt_addr;
        } else {
                const struct iphdr *iph = ip_hdr(skb);
                saddr = iph->saddr;
@@ -1366,8 +1366,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
                kfree_skb(skb_synack);
                return -1;
        }
-       err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-                                   ireq->rmt_addr, ireq->opt);
+       err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+                                   ireq->ir_rmt_addr, ireq->opt);
        err = net_xmit_eval(err);
        if (!err)
                tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1410,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
        inet_csk(child)->icsk_af_ops->rebuild_header(child);
        tcp_init_congestion_control(child);
        tcp_mtup_init(child);
-       tcp_init_buffer_space(child);
        tcp_init_metrics(child);
+       tcp_init_buffer_space(child);
 
        /* Queue the data carried in the SYN packet. We need to first
         * bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1502,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_openreq_init(req, &tmp_opt, skb);
 
        ireq = inet_rsk(req);
-       ireq->loc_addr = daddr;
-       ireq->rmt_addr = saddr;
+       ireq->ir_loc_addr = daddr;
+       ireq->ir_rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 
@@ -1578,15 +1578,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
            fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
 
        if (skb_synack) {
-               __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+               __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
                skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
        } else
                goto drop_and_free;
 
        if (likely(!do_fastopen)) {
                int err;
-               err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-                    ireq->rmt_addr, ireq->opt);
+               err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+                    ireq->ir_rmt_addr, ireq->opt);
                err = net_xmit_eval(err);
                if (err || want_cookie)
                        goto drop_and_free;
@@ -1644,9 +1644,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
        ireq                  = inet_rsk(req);
-       newinet->inet_daddr   = ireq->rmt_addr;
-       newinet->inet_rcv_saddr = ireq->loc_addr;
-       newinet->inet_saddr           = ireq->loc_addr;
+       newinet->inet_daddr   = ireq->ir_rmt_addr;
+       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       newinet->inet_saddr           = ireq->ir_loc_addr;
        inet_opt              = ireq->opt;
        rcu_assign_pointer(newinet->inet_opt, inet_opt);
        ireq->opt             = NULL;
@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCP sock list dumping. */
 
-static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
-{
-       return hlist_nulls_empty(head) ? NULL :
-               list_entry(head->first, struct inet_timewait_sock, tw_node);
-}
-
-static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
-{
-       return !is_a_nulls(tw->tw_node.next) ?
-               hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
-}
-
 /*
  * Get next listener socket follow cur.  If cur is NULL, get first socket
  * starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
        return rc;
 }
 
-static inline bool empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(const struct tcp_iter_state *st)
 {
-       return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
-               hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+       return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
 }
 
 /*
@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq)
        for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
                struct sock *sk;
                struct hlist_nulls_node *node;
-               struct inet_timewait_sock *tw;
                spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
 
                /* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq)
                        rc = sk;
                        goto out;
                }
-               st->state = TCP_SEQ_STATE_TIME_WAIT;
-               inet_twsk_for_each(tw, node,
-                                  &tcp_hashinfo.ehash[st->bucket].twchain) {
-                       if (tw->tw_family != st->family ||
-                           !net_eq(twsk_net(tw), net)) {
-                               continue;
-                       }
-                       rc = tw;
-                       goto out;
-               }
                spin_unlock_bh(lock);
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
        return rc;
@@ -2365,7 +2340,6 @@ out:
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
        struct sock *sk = cur;
-       struct inet_timewait_sock *tw;
        struct hlist_nulls_node *node;
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
        ++st->num;
        ++st->offset;
 
-       if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
-               tw = cur;
-               tw = tw_next(tw);
-get_tw:
-               while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
-                       tw = tw_next(tw);
-               }
-               if (tw) {
-                       cur = tw;
-                       goto out;
-               }
-               spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
-
-               /* Look for next non empty bucket */
-               st->offset = 0;
-               while (++st->bucket <= tcp_hashinfo.ehash_mask &&
-                               empty_bucket(st))
-                       ;
-               if (st->bucket > tcp_hashinfo.ehash_mask)
-                       return NULL;
-
-               spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-               sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
-       } else
-               sk = sk_nulls_next(sk);
+       sk = sk_nulls_next(sk);
 
        sk_nulls_for_each_from(sk, node) {
                if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
-                       goto found;
+                       return sk;
        }
 
-       st->state = TCP_SEQ_STATE_TIME_WAIT;
-       tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
-       goto get_tw;
-found:
-       cur = sk;
-out:
-       return cur;
+       spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+       ++st->bucket;
+       return established_get_first(seq);
 }
 
 static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
                if (rc)
                        break;
                st->bucket = 0;
+               st->state = TCP_SEQ_STATE_ESTABLISHED;
                /* Fallthrough */
        case TCP_SEQ_STATE_ESTABLISHED:
-       case TCP_SEQ_STATE_TIME_WAIT:
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
                if (st->bucket > tcp_hashinfo.ehash_mask)
                        break;
                rc = established_get_first(seq);
@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                }
                break;
        case TCP_SEQ_STATE_ESTABLISHED:
-       case TCP_SEQ_STATE_TIME_WAIT:
                rc = established_get_next(seq, v);
                break;
        }
@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
                if (v != SEQ_START_TOKEN)
                        spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
                        spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2606,10 +2548,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
                i,
-               ireq->loc_addr,
+               ireq->ir_loc_addr,
                ntohs(inet_sk(sk)->inet_sport),
-               ireq->rmt_addr,
-               ntohs(ireq->rmt_port),
+               ireq->ir_rmt_addr,
+               ntohs(ireq->ir_rmt_port),
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 static int tcp4_seq_show(struct seq_file *seq, void *v)
 {
        struct tcp_iter_state *st;
+       struct sock *sk = v;
        int len;
 
        if (v == SEQ_START_TOKEN) {
@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
        case TCP_SEQ_STATE_ESTABLISHED:
-               get_tcp4_sock(v, seq, st->num, &len);
+               if (sk->sk_state == TCP_TIME_WAIT)
+                       get_timewait4_sock(v, seq, st->num, &len);
+               else
+                       get_tcp4_sock(v, seq, st->num, &len);
                break;
        case TCP_SEQ_STATE_OPENREQ:
                get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
-               get_timewait4_sock(v, seq, st->num, &len);
-               break;
        }
        seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
 out:
@@ -2806,6 +2749,7 @@ struct proto tcp_prot = {
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
+       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
index 559d4ae6ebf4ed32ff80605e3d1b5c3792b1752c..03e9154f7e687efef63c91878e33427672bc4036 100644 (file)
@@ -6,15 +6,10 @@
 #include <linux/memcontrol.h>
 #include <linux/module.h>
 
-static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
-{
-       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
-}
-
 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
 {
        if (sk->sk_cgrp->memory_pressure)
-               *sk->sk_cgrp->memory_pressure = 1;
+               sk->sk_cgrp->memory_pressure = 1;
 }
 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
 
@@ -27,34 +22,24 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
         */
        struct res_counter *res_parent = NULL;
        struct cg_proto *cg_proto, *parent_cg;
-       struct tcp_memcontrol *tcp;
        struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-       struct net *net = current->nsproxy->net_ns;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
-       tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
-       tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
-       tcp->tcp_memory_pressure = 0;
+       cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
+       cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
+       cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
+       cg_proto->memory_pressure = 0;
+       cg_proto->memcg = memcg;
 
        parent_cg = tcp_prot.proto_cgroup(parent);
        if (parent_cg)
-               res_parent = parent_cg->memory_allocated;
-
-       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
-       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+               res_parent = &parent_cg->memory_allocated;
 
-       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
-       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
-       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
-       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
-       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
-       cg_proto->memcg = memcg;
+       res_counter_init(&cg_proto->memory_allocated, res_parent);
+       percpu_counter_init(&cg_proto->sockets_allocated, 0);
 
        return 0;
 }
@@ -63,21 +48,17 @@ EXPORT_SYMBOL(tcp_init_cgroup);
 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
 {
        struct cg_proto *cg_proto;
-       struct tcp_memcontrol *tcp;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+       percpu_counter_destroy(&cg_proto->sockets_allocated);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
 {
-       struct net *net = current->nsproxy->net_ns;
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
        u64 old_lim;
        int i;
@@ -90,16 +71,14 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
        if (val > RES_COUNTER_MAX)
                val = RES_COUNTER_MAX;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
        if (ret)
                return ret;
 
        for (i = 0; i < 3; i++)
-               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
-                                            net->ipv4.sysctl_tcp_mem[i]);
+               cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                               sysctl_tcp_mem[i]);
 
        if (val == RES_COUNTER_MAX)
                clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
@@ -156,28 +135,24 @@ static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
 
 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return default_val;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+       return res_counter_read_u64(&cg_proto->memory_allocated, type);
 }
 
 static u64 tcp_read_usage(struct mem_cgroup *memcg)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+       return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
 }
 
 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -205,54 +180,25 @@ static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
        struct mem_cgroup *memcg;
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        memcg = mem_cgroup_from_css(css);
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
-       tcp = tcp_from_cgproto(cg_proto);
 
        switch (event) {
        case RES_MAX_USAGE:
-               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               res_counter_reset_max(&cg_proto->memory_allocated);
                break;
        case RES_FAILCNT:
-               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               res_counter_reset_failcnt(&cg_proto->memory_allocated);
                break;
        }
 
        return 0;
 }
 
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
-{
-       struct tcp_memcontrol *tcp;
-       struct cg_proto *cg_proto;
-
-       cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
-       if (!cg_proto)
-               return 0;
-
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-}
-
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
-{
-       struct tcp_memcontrol *tcp;
-       struct cg_proto *cg_proto;
-
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
-               return;
-
-       tcp = tcp_from_cgproto(cg_proto);
-
-       tcp->tcp_prot_mem[idx] = val;
-}
-
 static struct cftype tcp_files[] = {
        {
                .name = "kmem.tcp.limit_in_bytes",
index 52f3c6b971d2def6854cf7d489022ad499ee0d24..4a2a84110dfb2ef68f8154d23958fbddae772775 100644 (file)
@@ -215,13 +215,15 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
        addr.family = req->rsk_ops->family;
        switch (addr.family) {
        case AF_INET:
-               addr.addr.a4 = inet_rsk(req)->rmt_addr;
+               addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
-               hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+               *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+               hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
                break;
+#endif
        default:
                return NULL;
        }
@@ -240,7 +242,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 
 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
 {
-       struct inet6_timewait_sock *tw6;
        struct tcp_metrics_block *tm;
        struct inetpeer_addr addr;
        unsigned int hash;
@@ -252,11 +253,12 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
                addr.addr.a4 = tw->tw_daddr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               tw6 = inet6_twsk((struct sock *)tw);
-               *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
-               hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+               *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
+               hash = ipv6_addr_hash(&tw->tw_v6_daddr);
                break;
+#endif
        default:
                return NULL;
        }
@@ -288,10 +290,12 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
                addr.addr.a4 = inet_sk(sk)->inet_daddr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
-               hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+               *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
+               hash = ipv6_addr_hash(&sk->sk_v6_daddr);
                break;
+#endif
        default:
                return NULL;
        }
index 58a3e69aef6440d36061ea9e6291152442954239..97b684159861bc451219bf66359afa288049a63e 100644 (file)
@@ -293,12 +293,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
-                       struct inet6_timewait_sock *tw6;
 
-                       tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-                       tw6 = inet6_twsk((struct sock *)tw);
-                       tw6->tw_v6_daddr = np->daddr;
-                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+                       tw->tw_v6_daddr = sk->sk_v6_daddr;
+                       tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_ipv6only = np->ipv6only;
                }
index 3a7525e6c08633dad9424bc1a9cb13bf26dfec1e..a7a5583eab04aff966521bcc6de4075fffd27e70 100644 (file)
@@ -14,7 +14,7 @@
 #include <net/tcp.h>
 #include <net/protocol.h>
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                                netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -56,6 +56,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                               SKB_GSO_TCP_ECN |
                               SKB_GSO_TCPV6 |
                               SKB_GSO_GRE |
+                              SKB_GSO_IPIP |
+                              SKB_GSO_SIT |
                               SKB_GSO_MPLS |
                               SKB_GSO_UDP_TUNNEL |
                               0) ||
@@ -139,7 +141,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
 out:
        return segs;
 }
-EXPORT_SYMBOL(tcp_tso_segment);
+EXPORT_SYMBOL(tcp_gso_segment);
 
 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
@@ -320,7 +322,7 @@ static int tcp4_gro_complete(struct sk_buff *skb)
 static const struct net_offload tcpv4_offload = {
        .callbacks = {
                .gso_send_check =       tcp_v4_gso_send_check,
-               .gso_segment    =       tcp_tso_segment,
+               .gso_segment    =       tcp_gso_segment,
                .gro_receive    =       tcp4_gro_receive,
                .gro_complete   =       tcp4_gro_complete,
        },
index 7c83cb8bf1378022ba3a68c56403ef40801cd3ae..672854664ff5c1d36783ac1bfb72fea481648ca1 100644 (file)
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
        unsigned int size = 0;
        unsigned int eff_sacks;
 
+       opts->options = 0;
+
 #ifdef CONFIG_TCP_MD5SIG
        *md5 = tp->af_specific->md5_lookup(sk, sk);
        if (unlikely(*md5)) {
@@ -848,15 +850,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        BUG_ON(!skb || !tcp_skb_pcount(skb));
 
-       /* If congestion control is doing timestamping, we must
-        * take such a timestamp before we potentially clone/copy.
-        */
-       if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
-               __net_timestamp(skb);
-
-       if (likely(clone_it)) {
+       if (clone_it) {
                const struct sk_buff *fclone = skb + 1;
 
+               /* If congestion control is doing timestamping, we must
+                * take such a timestamp before we potentially clone/copy.
+                */
+               if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
+                       __net_timestamp(skb);
+
                if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
                             fclone->fclone == SKB_FCLONE_CLONE))
                        NET_INC_STATS_BH(sock_net(sk),
@@ -895,8 +897,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        skb_orphan(skb);
        skb->sk = sk;
-       skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
-                         tcp_wfree : sock_wfree;
+       skb->destructor = tcp_wfree;
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
@@ -985,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                                 unsigned int mss_now)
 {
-       if (skb->len <= mss_now || !sk_can_gso(sk) ||
-           skb->ip_summed == CHECKSUM_NONE) {
+       /* Make sure we own this skb before messing gso_size/gso_segs */
+       WARN_ON_ONCE(skb_cloned(skb));
+
+       if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
                /* Avoid the costly divide in the normal
                 * non-TSO case.
                 */
@@ -1066,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        if (nsize < 0)
                nsize = 0;
 
-       if (skb_cloned(skb) &&
-           skb_is_nonlinear(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
        /* Get a new skb... force flag on. */
@@ -1840,7 +1841,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
        while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
 
-
                tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
                BUG_ON(!tso_segs);
 
@@ -1869,13 +1869,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                break;
                }
 
-               /* TSQ : sk_wmem_alloc accounts skb truesize,
-                * including skb overhead. But thats OK.
+               /* TCP Small Queues :
+                * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+                * This allows for :
+                *  - better RTT estimation and ACK scheduling
+                *  - faster recovery
+                *  - high rates
                 */
-               if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
+               limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
+
+               if (atomic_read(&sk->sk_wmem_alloc) > limit) {
                        set_bit(TSQ_THROTTLED, &tp->tsq_flags);
                        break;
                }
+
                limit = mss_now;
                if (tso_segs > 1 && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -2337,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                int oldpcount = tcp_skb_pcount(skb);
 
                if (unlikely(oldpcount > 1)) {
+                       if (skb_unclone(skb, GFP_ATOMIC))
+                               return -ENOMEM;
                        tcp_init_tso_segs(sk, skb, cur_mss);
                        tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
                }
@@ -2344,21 +2353,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
 
        tcp_retrans_try_collapse(sk, skb, cur_mss);
 
-       /* Some Solaris stacks overoptimize and ignore the FIN on a
-        * retransmit when old data is attached.  So strip it off
-        * since it is cheap to do so and saves bytes on the network.
-        */
-       if (skb->len > 0 &&
-           (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
-           tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
-               if (!pskb_trim(skb, 0)) {
-                       /* Reuse, even though it does some unnecessary work */
-                       tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
-                                            TCP_SKB_CB(skb)->tcp_flags);
-                       skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
-
        /* Make a copy, if the first transmission SKB clone we made
         * is still in somebody's hands, else make a clone.
         */
@@ -2727,8 +2721,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        th->syn = 1;
        th->ack = 1;
        TCP_ECN_make_synack(req, th);
-       th->source = ireq->loc_port;
-       th->dest = ireq->rmt_port;
+       th->source = htons(ireq->ir_num);
+       th->dest = ireq->ir_rmt_port;
        /* Setting of flags are superfluous here for callers (and ECE is
         * not even correctly set)
         */
index 611beab38a004aae36730d18bffb62928a2321b1..8b97d71e193b97cf18e424f01be911e34a614b32 100644 (file)
@@ -101,22 +101,6 @@ static inline int tcp_probe_avail(void)
                si4.sin_addr.s_addr = inet->inet_##mem##addr;   \
        } while (0)                                             \
 
-#if IS_ENABLED(CONFIG_IPV6)
-#define tcp_probe_copy_fl_to_si6(inet, si6, mem)               \
-       do {                                                    \
-               struct ipv6_pinfo *pi6 = inet->pinet6;          \
-               si6.sin6_family = AF_INET6;                     \
-               si6.sin6_port = inet->inet_##mem##port;         \
-               si6.sin6_addr = pi6->mem##addr;                 \
-               si6.sin6_flowinfo = 0; /* No need here. */      \
-               si6.sin6_scope_id = 0;  /* No need here. */     \
-       } while (0)
-#else
-#define tcp_probe_copy_fl_to_si6(fl, si6, mem)                 \
-       do {                                                    \
-               memset(&si6, 0, sizeof(si6));                   \
-       } while (0)
-#endif
 
 /*
  * Hook inserted to be called before each receive packet.
@@ -147,8 +131,17 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
                                break;
                        case AF_INET6:
-                               tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
-                               tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+                               memset(&p->src.v6, 0, sizeof(p->src.v6));
+                               memset(&p->dst.v6, 0, sizeof(p->dst.v6));
+#if IS_ENABLED(CONFIG_IPV6)
+                               p->src.v6.sin6_family = AF_INET6;
+                               p->src.v6.sin6_port = inet->inet_sport;
+                               p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
+
+                               p->dst.v6.sin6_family = AF_INET6;
+                               p->dst.v6.sin6_port = inet->inet_dport;
+                               p->dst.v6.sin6_addr = sk->sk_v6_daddr;
+#endif
                                break;
                        default:
                                BUG();
index 4b85e6f636c9e66dbb1cc0f4acd197c33cb6cfb6..af07b5b23ebf11e4df7cf3383f6508f985a5cf94 100644 (file)
@@ -374,9 +374,8 @@ void tcp_retransmit_timer(struct sock *sk)
                }
 #if IS_ENABLED(CONFIG_IPV6)
                else if (sk->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
                        LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
-                                      &np->daddr,
+                                      &sk->sk_v6_daddr,
                                       ntohs(inet->inet_dport), inet->inet_num,
                                       tp->snd_una, tp->snd_nxt);
                }
index 6c0eea2f8249bf6b08c85395c0ad4b4d8ef52b39..0531b99d8637bac1c4f19537a351f060fffa56ed 100644 (file)
@@ -15,10 +15,10 @@ struct vegas {
        u32     baseRTT;        /* the min of all Vegas RTT measurements seen (in usec) */
 };
 
-extern void tcp_vegas_init(struct sock *sk);
-extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
-extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+void tcp_vegas_init(struct sock *sk);
+void tcp_vegas_state(struct sock *sk, u8 ca_state);
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
+void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
 
 #endif /* __TCP_VEGAS_H */
index 74d2c95db57f3768d62ee450ed6bab64ca09b26d..89909dd730ddd65a4eac4f18e8a38925962b8534 100644 (file)
 #include <linux/seq_file.h>
 #include <net/net_namespace.h>
 #include <net/icmp.h>
+#include <net/inet_hashtables.h>
 #include <net/route.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -219,7 +220,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                unsigned short first, last;
                DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                rand = net_random();
@@ -406,6 +407,18 @@ static inline int compute_score2(struct sock *sk, struct net *net,
        return score;
 }
 
+static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
+                                const __u16 lport, const __be32 faddr,
+                                const __be16 fport)
+{
+       static u32 udp_ehash_secret __read_mostly;
+
+       net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
+
+       return __inet_ehashfn(laddr, lport, faddr, fport,
+                             udp_ehash_secret + net_hash_mix(net));
+}
+
 
 /* called with read_rcu_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
@@ -429,8 +442,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, sport);
+                               hash = udp_ehashfn(net, daddr, hnum,
+                                                  saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -510,8 +523,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, sport);
+                               hash = udp_ehashfn(net, daddr, hnum,
+                                                  saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -565,6 +578,26 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
 
+static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
+                                      __be16 loc_port, __be32 loc_addr,
+                                      __be16 rmt_port, __be32 rmt_addr,
+                                      int dif, unsigned short hnum)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       if (!net_eq(sock_net(sk), net) ||
+           udp_sk(sk)->udp_port_hash != hnum ||
+           (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+           (inet->inet_dport != rmt_port && inet->inet_dport) ||
+           (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
+           ipv6_only_sock(sk) ||
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+               return false;
+       if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
+               return false;
+       return true;
+}
+
 static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
                                             __be16 loc_port, __be32 loc_addr,
                                             __be16 rmt_port, __be32 rmt_addr,
@@ -575,20 +608,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
        unsigned short hnum = ntohs(loc_port);
 
        sk_nulls_for_each_from(s, node) {
-               struct inet_sock *inet = inet_sk(s);
-
-               if (!net_eq(sock_net(s), net) ||
-                   udp_sk(s)->udp_port_hash != hnum ||
-                   (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
-                   (inet->inet_dport != rmt_port && inet->inet_dport) ||
-                   (inet->inet_rcv_saddr &&
-                    inet->inet_rcv_saddr != loc_addr) ||
-                   ipv6_only_sock(s) ||
-                   (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
-                       continue;
-               if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
-                       continue;
-               goto found;
+               if (__udp_is_mcast_sock(net, s,
+                                       loc_port, loc_addr,
+                                       rmt_port, rmt_addr,
+                                       dif, hnum))
+                       goto found;
        }
        s = NULL;
 found:
@@ -658,7 +682,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
                break;
        case ICMP_REDIRECT:
                ipv4_sk_redirect(skb, sk);
-               break;
+               goto out;
        }
 
        /*
@@ -855,6 +879,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
@@ -938,7 +964,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
@@ -1403,8 +1429,10 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (inet_sk(sk)->inet_daddr)
+       if (inet_sk(sk)->inet_daddr) {
                sock_rps_save_rxhash(sk, skb);
+               sk_mark_napi_id(sk, skb);
+       }
 
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
@@ -1528,7 +1556,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        rc = 0;
 
-       ipv4_pktinfo_prepare(skb);
+       ipv4_pktinfo_prepare(sk, skb);
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
@@ -1577,6 +1605,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
                kfree_skb(skb1);
 }
 
+static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       dst_hold(dst);
+       sk->sk_rx_dst = dst;
+}
+
 /*
  *     Multicasts and broadcasts go to each listener.
  *
@@ -1705,16 +1741,32 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp4_csum_init(skb, uh, proto))
                goto csum_error;
 
-       if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
-               return __udp4_lib_mcast_deliver(net, skb, uh,
-                               saddr, daddr, udptable);
+       if (skb->sk) {
+               int ret;
+               sk = skb->sk;
+
+               if (unlikely(sk->sk_rx_dst == NULL))
+                       udp_sk_rx_dst_set(sk, skb);
+
+               ret = udp_queue_rcv_skb(sk, skb);
+
+               /* a return value > 0 means to resubmit the input, but
+                * it wants the return to be -protocol, or 0
+                */
+               if (ret > 0)
+                       return -ret;
+               return 0;
+       } else {
+               if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+                       return __udp4_lib_mcast_deliver(net, skb, uh,
+                                       saddr, daddr, udptable);
 
-       sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+               sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+       }
 
        if (sk != NULL) {
                int ret;
 
-               sk_mark_napi_id(sk, skb);
                ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -1768,6 +1820,135 @@ drop:
        return 0;
 }
 
+/* We can only early demux multicast if there is a single matching socket.
+ * If more than one socket found returns NULL
+ */
+static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+                                                 __be16 loc_port, __be32 loc_addr,
+                                                 __be16 rmt_port, __be32 rmt_addr,
+                                                 int dif)
+{
+       struct sock *sk, *result;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(loc_port);
+       unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+       struct udp_hslot *hslot = &udp_table.hash[slot];
+
+       rcu_read_lock();
+begin:
+       count = 0;
+       result = NULL;
+       sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+               if (__udp_is_mcast_sock(net, sk,
+                                       loc_port, loc_addr,
+                                       rmt_port, rmt_addr,
+                                       dif, hnum)) {
+                       result = sk;
+                       ++count;
+               }
+       }
+       /*
+        * if the nulls value we got at the end of this lookup is
+        * not the expected one, we must restart lookup.
+        * We probably met an item that was moved to another chain.
+        */
+       if (get_nulls_value(node) != slot)
+               goto begin;
+
+       if (result) {
+               if (count != 1 ||
+                   unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+                       result = NULL;
+               else if (unlikely(!__udp_is_mcast_sock(net, result,
+                                                      loc_port, loc_addr,
+                                                      rmt_port, rmt_addr,
+                                                      dif, hnum))) {
+                       sock_put(result);
+                       result = NULL;
+               }
+       }
+       rcu_read_unlock();
+       return result;
+}
+
+/* For unicast we should only early demux connected sockets or we can
+ * break forwarding setups.  The chains here can be long so only check
+ * if the first socket is an exact match and if not move on.
+ */
+static struct sock *__udp4_lib_demux_lookup(struct net *net,
+                                           __be16 loc_port, __be32 loc_addr,
+                                           __be16 rmt_port, __be32 rmt_addr,
+                                           int dif)
+{
+       struct sock *sk, *result;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(loc_port);
+       unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+       unsigned int slot2 = hash2 & udp_table.mask;
+       struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+       const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+
+       rcu_read_lock();
+       result = NULL;
+       udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+               if (INET_MATCH(sk, net, acookie,
+                              rmt_addr, loc_addr, ports, dif))
+                       result = sk;
+               /* Only check first socket in chain */
+               break;
+       }
+
+       if (result) {
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+                       result = NULL;
+               else if (unlikely(!INET_MATCH(sk, net, acookie,
+                                             rmt_addr, loc_addr,
+                                             ports, dif))) {
+                       sock_put(result);
+                       result = NULL;
+               }
+       }
+       rcu_read_unlock();
+       return result;
+}
+
+void udp_v4_early_demux(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct udphdr *uh = udp_hdr(skb);
+       struct sock *sk;
+       struct dst_entry *dst;
+       struct net *net = dev_net(skb->dev);
+       int dif = skb->dev->ifindex;
+
+       /* validate the packet */
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+               return;
+
+       if (skb->pkt_type == PACKET_BROADCAST ||
+           skb->pkt_type == PACKET_MULTICAST)
+               sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+                                                  uh->source, iph->saddr, dif);
+       else if (skb->pkt_type == PACKET_HOST)
+               sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+                                            uh->source, iph->saddr, dif);
+       else
+               return;
+
+       if (!sk)
+               return;
+
+       skb->sk = sk;
+       skb->destructor = sock_edemux;
+       dst = sk->sk_rx_dst;
+
+       if (dst)
+               dst = dst_check(dst, 0);
+       if (dst)
+               skb_dst_set_noref(skb, dst);
+}
+
 int udp_rcv(struct sk_buff *skb)
 {
        return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
index 5a681e298b905127cdd5f0655d136754ff6eaabb..f3c27899f62b914f7b406094472a47e2fe2df613 100644 (file)
@@ -5,30 +5,30 @@
 #include <net/protocol.h>
 #include <net/inet_common.h>
 
-extern int     __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void    __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 
-extern int     udp_v4_get_port(struct sock *sk, unsigned short snum);
+int udp_v4_get_port(struct sock *sk, unsigned short snum);
 
-extern int     udp_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, unsigned int optlen);
-extern int     udp_getsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int __user *optlen);
+int udp_setsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, unsigned int optlen);
+int udp_getsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, int __user *optlen);
 
 #ifdef CONFIG_COMPAT
-extern int     compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, unsigned int optlen);
-extern int     compat_udp_getsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int __user *optlen);
+int compat_udp_setsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, unsigned int optlen);
+int compat_udp_getsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, int __user *optlen);
 #endif
-extern int     udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                           size_t len, int noblock, int flags, int *addr_len);
-extern int     udp_sendpage(struct sock *sk, struct page *page, int offset,
-                            size_t size, int flags);
-extern int     udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern void    udp_destroy_sock(struct sock *sk);
+int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len, int noblock, int flags, int *addr_len);
+int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+                int flags);
+int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udp_destroy_sock(struct sock *sk);
 
 #ifdef CONFIG_PROC_FS
-extern int     udp4_seq_show(struct seq_file *seq, void *v);
+int udp4_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif /* _UDP4_IMPL_H */
index f35eccaa855ebfbac508503c9111311d6728f8ff..83206de2bc7679dc20e4fdcf34f8b1f5cd7831ef 100644 (file)
@@ -52,6 +52,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 
                if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
                                      SKB_GSO_UDP_TUNNEL |
+                                     SKB_GSO_IPIP |
                                      SKB_GSO_GRE | SKB_GSO_MPLS) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
index b5663c37f089ed0afe33115bcbfad2555b8d0f48..31b18152528fe4dbf9e500ae0c9a2a1a5a3a2adf 100644 (file)
 #include <net/xfrm.h>
 
 /* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
 static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
 
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -EEXIST;
        int priority = handler->priority;
 
@@ -50,10 +50,10 @@ err:
 }
 EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
 
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -ENOENT;
 
        mutex_lock(&xfrm4_mode_tunnel_input_mutex);
@@ -134,7 +134,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct xfrm_tunnel *handler;
+       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
index 9a459be24af762b42e2d667618f7149c055e5ef6..ccde54248c8ca77d3efc173ce457aa5f0153337a 100644 (file)
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl4, 0, sizeof(struct flowi4));
        fl4->flowi4_mark = skb->mark;
+       fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
 
        if (!ip_is_fragment(iph)) {
                switch (iph->protocol) {
index 11b13ea69db4e6c35c8b992de9055bf6a312d802..e1a8d903e3662cb1aa36def1ffa5208cd6d515a8 100644 (file)
@@ -153,6 +153,17 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
        ---help---
          Support for MIPv6 route optimization mode.
 
+config IPV6_VTI
+tristate "Virtual (secure) IPv6: tunneling"
+       select IPV6_TUNNEL
+       depends on INET6_XFRM_MODE_TUNNEL
+       ---help---
+       Tunneling means encapsulating data of one protocol type within
+       another protocol and sending it over a channel that understands the
+       encapsulating protocol. This can be used with xfrm mode tunnel to give
+       the notion of a secure tunnel for IPSEC and then use routing protocol
+       on top.
+
 config IPV6_SIT
        tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
        select INET_TUNNEL
index 470a9c008e9b9e921045bb5b11b77569e53ac5c9..17bb830872db21e07080ed9f6a0ef93cbccee9b7 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
 obj-$(CONFIG_NETFILTER)        += netfilter/
 
+obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
index d6ff12617f36f9eabbf7c9744b3b8121a567c1bf..cd3fb301da38a970cd48302386428a8923f21c91 100644 (file)
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
        return false;
 }
 
+/* Compares an address/prefix_len with addresses on device @dev.
+ * If one is found it returns true.
+ */
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+       const unsigned int prefix_len, struct net_device *dev)
+{
+       struct inet6_dev *idev;
+       struct inet6_ifaddr *ifa;
+       bool ret = false;
+
+       rcu_read_lock();
+       idev = __in6_dev_get(dev);
+       if (idev) {
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(ifa, &idev->addr_list, if_list) {
+                       ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
+                       if (ret)
+                               break;
+               }
+               read_unlock_bh(&idev->lock);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(ipv6_chk_custom_prefix);
+
 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
 {
        struct inet6_dev *idev;
@@ -2193,43 +2220,21 @@ ok:
                        else
                                stored_lft = 0;
                        if (!update_lft && !create && stored_lft) {
-                               if (valid_lft > MIN_VALID_LIFETIME ||
-                                   valid_lft > stored_lft)
-                                       update_lft = 1;
-                               else if (stored_lft <= MIN_VALID_LIFETIME) {
-                                       /* valid_lft <= stored_lft is always true */
-                                       /*
-                                        * RFC 4862 Section 5.5.3e:
-                                        * "Note that the preferred lifetime of
-                                        *  the corresponding address is always
-                                        *  reset to the Preferred Lifetime in
-                                        *  the received Prefix Information
-                                        *  option, regardless of whether the
-                                        *  valid lifetime is also reset or
-                                        *  ignored."
-                                        *
-                                        *  So if the preferred lifetime in
-                                        *  this advertisement is different
-                                        *  than what we have stored, but the
-                                        *  valid lifetime is invalid, just
-                                        *  reset prefered_lft.
-                                        *
-                                        *  We must set the valid lifetime
-                                        *  to the stored lifetime since we'll
-                                        *  be updating the timestamp below,
-                                        *  else we'll set it back to the
-                                        *  minimum.
-                                        */
-                                       if (prefered_lft != ifp->prefered_lft) {
-                                               valid_lft = stored_lft;
-                                               update_lft = 1;
-                                       }
-                               } else {
-                                       valid_lft = MIN_VALID_LIFETIME;
-                                       if (valid_lft < prefered_lft)
-                                               prefered_lft = valid_lft;
-                                       update_lft = 1;
-                               }
+                               const u32 minimum_lft = min(
+                                       stored_lft, (u32)MIN_VALID_LIFETIME);
+                               valid_lft = max(valid_lft, minimum_lft);
+
+                               /* RFC4862 Section 5.5.3e:
+                                * "Note that the preferred lifetime of the
+                                *  corresponding address is always reset to
+                                *  the Preferred Lifetime in the received
+                                *  Prefix Information option, regardless of
+                                *  whether the valid lifetime is also reset or
+                                *  ignored."
+                                *
+                                * So we should always update prefered_lft here.
+                                */
+                               update_lft = 1;
                        }
 
                        if (update_lft) {
index 7c96100b021ef0558a739b6da719246548153b11..6468bda1f2b94c382fe7212135ed1245f71e221f 100644 (file)
@@ -110,11 +110,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
-       if (sock->type != SOCK_RAW &&
-           sock->type != SOCK_DGRAM &&
-           !inet_ehash_secret)
-               build_ehash_secret();
-
        /* Look for the requested type/protocol pair. */
 lookup_protocol:
        err = -ESOCKTNOSUPPORT;
@@ -364,7 +359,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        inet->inet_rcv_saddr = v4addr;
        inet->inet_saddr = v4addr;
 
-       np->rcv_saddr = addr->sin6_addr;
+       sk->sk_v6_rcv_saddr = addr->sin6_addr;
 
        if (!(addr_type & IPV6_ADDR_MULTICAST))
                np->saddr = addr->sin6_addr;
@@ -461,14 +456,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
                    peer == 1)
                        return -ENOTCONN;
                sin->sin6_port = inet->inet_dport;
-               sin->sin6_addr = np->daddr;
+               sin->sin6_addr = sk->sk_v6_daddr;
                if (np->sndflow)
                        sin->sin6_flowinfo = np->flow_label;
        } else {
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
                        sin->sin6_addr = np->saddr;
                else
-                       sin->sin6_addr = np->rcv_saddr;
+                       sin->sin6_addr = sk->sk_v6_rcv_saddr;
 
                sin->sin6_port = inet->inet_sport;
        }
@@ -655,7 +650,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = sk->sk_protocol;
-               fl6.daddr = np->daddr;
+               fl6.daddr = sk->sk_v6_daddr;
                fl6.saddr = np->saddr;
                fl6.flowlabel = np->flow_label;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -870,8 +865,6 @@ static int __init inet6_init(void)
        if (err)
                goto out_sock_register_fail;
 
-       tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
        /*
         *      ipngwg API draft makes clear that the correct semantics
         *      for TCP and UDP is to consider one TCP and UDP instance
@@ -1028,52 +1021,4 @@ out_unregister_tcp_proto:
 }
 module_init(inet6_init);
 
-static void __exit inet6_exit(void)
-{
-       if (disable_ipv6_mod)
-               return;
-
-       /* First of all disallow new sockets creation. */
-       sock_unregister(PF_INET6);
-       /* Disallow any further netlink messages */
-       rtnl_unregister_all(PF_INET6);
-
-       udpv6_exit();
-       udplitev6_exit();
-       tcpv6_exit();
-
-       /* Cleanup code parts. */
-       ipv6_packet_cleanup();
-       ipv6_frag_exit();
-       ipv6_exthdrs_exit();
-       addrconf_cleanup();
-       ip6_flowlabel_cleanup();
-       ndisc_late_cleanup();
-       ip6_route_cleanup();
-#ifdef CONFIG_PROC_FS
-
-       /* Cleanup code parts. */
-       if6_proc_exit();
-       ipv6_misc_proc_exit();
-       udplite6_proc_exit();
-       raw6_proc_exit();
-#endif
-       ipv6_netfilter_fini();
-       ipv6_stub = NULL;
-       igmp6_cleanup();
-       ndisc_cleanup();
-       ip6_mr_cleanup();
-       icmpv6_cleanup();
-       rawv6_exit();
-
-       unregister_pernet_subsys(&inet6_net_ops);
-       proto_unregister(&rawv6_prot);
-       proto_unregister(&udplitev6_prot);
-       proto_unregister(&udpv6_prot);
-       proto_unregister(&tcpv6_prot);
-
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
-}
-module_exit(inet6_exit);
-
 MODULE_ALIAS_NETPROTO(PF_INET6);
index 73784c3d4642e09f0f9f92d1f699769ec08258d9..82e1da3a40b915e65c2ecf15662415511cc91286 100644 (file)
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index 48b6bd2a9a1451b7adf9a678ccce859003d57a79..a454b0ff57c7c67a91e2e5c887609e7a65e5a910 100644 (file)
@@ -107,16 +107,16 @@ ipv4_connected:
                if (err)
                        goto out;
 
-               ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
+               ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
 
                if (ipv6_addr_any(&np->saddr) ||
                    ipv6_mapped_addr_any(&np->saddr))
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 
-               if (ipv6_addr_any(&np->rcv_saddr) ||
-                   ipv6_mapped_addr_any(&np->rcv_saddr)) {
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
+                   ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-                                              &np->rcv_saddr);
+                                              &sk->sk_v6_rcv_saddr);
                        if (sk->sk_prot->rehash)
                                sk->sk_prot->rehash(sk);
                }
@@ -145,7 +145,7 @@ ipv4_connected:
                }
        }
 
-       np->daddr = *daddr;
+       sk->sk_v6_daddr = *daddr;
        np->flow_label = fl6.flowlabel;
 
        inet->inet_dport = usin->sin6_port;
@@ -156,7 +156,7 @@ ipv4_connected:
         */
 
        fl6.flowi6_proto = sk->sk_protocol;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
@@ -183,16 +183,16 @@ ipv4_connected:
        if (ipv6_addr_any(&np->saddr))
                np->saddr = fl6.saddr;
 
-       if (ipv6_addr_any(&np->rcv_saddr)) {
-               np->rcv_saddr = fl6.saddr;
+       if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+               sk->sk_v6_rcv_saddr = fl6.saddr;
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
                if (sk->sk_prot->rehash)
                        sk->sk_prot->rehash(sk);
        }
 
        ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-                     &np->daddr : NULL,
+                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                     &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
                      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                      &np->saddr :
@@ -883,11 +883,10 @@ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
 void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
                             __u16 srcp, __u16 destp, int bucket)
 {
-       struct ipv6_pinfo *np = inet6_sk(sp);
        const struct in6_addr *dest, *src;
 
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
+       dest  = &sp->sk_v6_daddr;
+       src   = &sp->sk_v6_rcv_saddr;
        seq_printf(seq,
                   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
index d3618a78fcac4b1f6e606a904196de79235833b1..e67e63f9858d7feae9e6fba4de667edb8f81875d 100644 (file)
@@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index e4311cbc8b4ecbf70ea1fb2e2f2415342be382cc..77bb8afb141d6349e3de622e0f20452552d70344 100644 (file)
@@ -70,20 +70,20 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
                                      struct flowi6 *fl6,
                                      const struct request_sock *req)
 {
-       struct inet6_request_sock *treq = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *final_p, final;
        struct dst_entry *dst;
 
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_proto = IPPROTO_TCP;
-       fl6->daddr = treq->rmt_addr;
+       fl6->daddr = ireq->ir_v6_rmt_addr;
        final_p = fl6_update_dst(fl6, np->opt, &final);
-       fl6->saddr = treq->loc_addr;
-       fl6->flowi6_oif = treq->iif;
+       fl6->saddr = ireq->ir_v6_loc_addr;
+       fl6->flowi6_oif = ireq->ir_iif;
        fl6->flowi6_mark = sk->sk_mark;
-       fl6->fl6_dport = inet_rsk(req)->rmt_port;
-       fl6->fl6_sport = inet_rsk(req)->loc_port;
+       fl6->fl6_dport = ireq->ir_rmt_port;
+       fl6->fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
        dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -129,13 +129,13 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
                                                     lopt->nr_table_entries)];
             (req = *prev) != NULL;
             prev = &req->dl_next) {
-               const struct inet6_request_sock *treq = inet6_rsk(req);
+               const struct inet_request_sock *ireq = inet_rsk(req);
 
-               if (inet_rsk(req)->rmt_port == rport &&
+               if (ireq->ir_rmt_port == rport &&
                    req->rsk_ops->family == AF_INET6 &&
-                   ipv6_addr_equal(&treq->rmt_addr, raddr) &&
-                   ipv6_addr_equal(&treq->loc_addr, laddr) &&
-                   (!treq->iif || treq->iif == iif)) {
+                   ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
+                   ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
+                   (!ireq->ir_iif || ireq->ir_iif == iif)) {
                        WARN_ON(req->sk != NULL);
                        *prevp = prev;
                        return req;
@@ -153,8 +153,8 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
-                                     inet_rsk(req)->rmt_port,
+       const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
+                                     inet_rsk(req)->ir_rmt_port,
                                      lopt->hash_rnd, lopt->nr_table_entries);
 
        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -165,11 +165,10 @@ EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
 {
-       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
        sin6->sin6_family = AF_INET6;
-       sin6->sin6_addr = np->daddr;
+       sin6->sin6_addr = sk->sk_v6_daddr;
        sin6->sin6_port = inet_sk(sk)->inet_dport;
        /* We do not store received flowlabel for TCP */
        sin6->sin6_flowinfo = 0;
@@ -203,7 +202,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
 
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_proto = sk->sk_protocol;
-       fl6->daddr = np->daddr;
+       fl6->daddr = sk->sk_v6_daddr;
        fl6->saddr = np->saddr;
        fl6->flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6->flowlabel);
@@ -245,7 +244,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
        skb_dst_set_noref(skb, dst);
 
        /* Restore final destination back after routing done */
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
 
        res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
        rcu_read_unlock();
index 32b4a1675d826d8ce50e36dbf314456075e1660e..262e13c02ec27dea15154d9b4af5fd413dd1c504 100644 (file)
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet6_ehashfn(struct net *net,
+                                 const struct in6_addr *laddr,
+                                 const u16 lport,
+                                 const struct in6_addr *faddr,
+                                 const __be16 fport)
+{
+       static u32 inet6_ehash_secret __read_mostly;
+       static u32 ipv6_hash_secret __read_mostly;
+
+       u32 lhash, fhash;
+
+       net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
+       net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+
+       lhash = (__force u32)laddr->s6_addr32[3];
+       fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+
+       return __inet6_ehashfn(lhash, lport, fhash, fport,
+                              inet6_ehash_secret + net_hash_mix(net));
+}
+
+static int inet6_sk_ehashfn(const struct sock *sk)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
+       const struct in6_addr *faddr = &sk->sk_v6_daddr;
+       const __u16 lport = inet->inet_num;
+       const __be16 fport = inet->inet_dport;
+       struct net *net = sock_net(sk);
+
+       return inet6_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
@@ -89,43 +122,22 @@ begin:
        sk_nulls_for_each_rcu(sk, node, &head->chain) {
                if (sk->sk_hash != hash)
                        continue;
-               if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-                               goto begintw;
-                       if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
-                                                 ports, dif))) {
-                               sock_put(sk);
-                               goto begin;
-                       }
-               goto out;
-               }
-       }
-       if (get_nulls_value(node) != slot)
-               goto begin;
-
-begintw:
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-               if (sk->sk_hash != hash)
+               if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
                        continue;
-               if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
-                                         ports, dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-                               sk = NULL;
-                               goto out;
-                       }
-                       if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
-                                                    ports, dif))) {
-                               sock_put(sk);
-                               goto begintw;
-                       }
+               if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
                        goto out;
+
+               if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
+                       sock_gen_put(sk);
+                       goto begin;
                }
+               goto found;
        }
        if (get_nulls_value(node) != slot)
-               goto begintw;
-       sk = NULL;
+               goto begin;
 out:
+       sk = NULL;
+found:
        rcu_read_unlock();
        return sk;
 }
@@ -140,11 +152,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
            sk->sk_family == PF_INET6) {
-               const struct ipv6_pinfo *np = inet6_sk(sk);
 
                score = 1;
-               if (!ipv6_addr_any(&np->rcv_saddr)) {
-                       if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                                return -1;
                        score++;
                }
@@ -236,9 +247,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
 {
        struct inet_hashinfo *hinfo = death_row->hashinfo;
        struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       const struct in6_addr *daddr = &np->rcv_saddr;
-       const struct in6_addr *saddr = &np->daddr;
+       const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
+       const struct in6_addr *saddr = &sk->sk_v6_daddr;
        const int dif = sk->sk_bound_dev_if;
        const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
        struct net *net = sock_net(sk);
@@ -248,38 +258,28 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
        struct sock *sk2;
        const struct hlist_nulls_node *node;
-       struct inet_timewait_sock *tw;
+       struct inet_timewait_sock *tw = NULL;
        int twrefcnt = 0;
 
        spin_lock(lock);
 
-       /* Check TIME-WAIT sockets first. */
-       sk_nulls_for_each(sk2, node, &head->twchain) {
-               if (sk2->sk_hash != hash)
-                       continue;
-
-               if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
-                                         ports, dif))) {
-                       tw = inet_twsk(sk2);
-                       if (twsk_unique(sk, sk2, twp))
-                               goto unique;
-                       else
-                               goto not_unique;
-               }
-       }
-       tw = NULL;
-
-       /* And established part... */
        sk_nulls_for_each(sk2, node, &head->chain) {
                if (sk2->sk_hash != hash)
                        continue;
-               if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
+
+               if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) {
+                       if (sk2->sk_state == TCP_TIME_WAIT) {
+                               tw = inet_twsk(sk2);
+                               if (twsk_unique(sk, sk2, twp))
+                                       break;
+                       }
                        goto not_unique;
+               }
        }
 
-unique:
        /* Must record num and sport now. Otherwise we will see
-        * in hash table socket with a funny identity. */
+        * in hash table socket with a funny identity.
+        */
        inet->inet_num = lport;
        inet->inet_sport = htons(lport);
        sk->sk_hash = hash;
@@ -312,9 +312,9 @@ not_unique:
 static inline u32 inet6_sk_port_offset(const struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32,
-                                         np->daddr.s6_addr32,
+
+       return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
+                                         sk->sk_v6_daddr.s6_addr32,
                                          inet->inet_dport);
 }
 
index 5bec666aba61d464fab4e77684eedd4265143cf9..5550a8113a6dc5f202c0ccda2c7166e98e81a7e9 100644 (file)
@@ -1529,25 +1529,6 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
        fib6_walk(&c.w);
 }
 
-void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
-                   int prune, void *arg)
-{
-       struct fib6_table *table;
-       struct hlist_head *head;
-       unsigned int h;
-
-       rcu_read_lock();
-       for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-               head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-                       read_lock_bh(&table->tb6_lock);
-                       fib6_clean_tree(net, &table->tb6_root,
-                                       func, prune, arg);
-                       read_unlock_bh(&table->tb6_lock);
-               }
-       }
-       rcu_read_unlock();
-}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg)
 {
@@ -1782,3 +1763,189 @@ void fib6_gc_cleanup(void)
        unregister_pernet_subsys(&fib6_net_ops);
        kmem_cache_destroy(fib6_node_kmem);
 }
+
+#ifdef CONFIG_PROC_FS
+
+struct ipv6_route_iter {
+       struct seq_net_private p;
+       struct fib6_walker_t w;
+       loff_t skip;
+       struct fib6_table *tbl;
+       __u32 sernum;
+};
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+       struct rt6_info *rt = v;
+       struct ipv6_route_iter *iter = seq->private;
+
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+#ifdef CONFIG_IPV6_SUBTREES
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
+#else
+       seq_puts(seq, "00000000000000000000000000000000 00 ");
+#endif
+       if (rt->rt6i_flags & RTF_GATEWAY)
+               seq_printf(seq, "%pi6", &rt->rt6i_gateway);
+       else
+               seq_puts(seq, "00000000000000000000000000000000");
+
+       seq_printf(seq, " %08x %08x %08x %08x %8s\n",
+                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+                  rt->dst.__use, rt->rt6i_flags,
+                  rt->dst.dev ? rt->dst.dev->name : "");
+       iter->w.leaf = NULL;
+       return 0;
+}
+
+static int ipv6_route_yield(struct fib6_walker_t *w)
+{
+       struct ipv6_route_iter *iter = w->args;
+
+       if (!iter->skip)
+               return 1;
+
+       do {
+               iter->w.leaf = iter->w.leaf->dst.rt6_next;
+               iter->skip--;
+               if (!iter->skip && iter->w.leaf)
+                       return 1;
+       } while (iter->w.leaf);
+
+       return 0;
+}
+
+static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
+{
+       memset(&iter->w, 0, sizeof(iter->w));
+       iter->w.func = ipv6_route_yield;
+       iter->w.root = &iter->tbl->tb6_root;
+       iter->w.state = FWS_INIT;
+       iter->w.node = iter->w.root;
+       iter->w.args = iter;
+       iter->sernum = iter->w.root->fn_sernum;
+       INIT_LIST_HEAD(&iter->w.lh);
+       fib6_walker_link(&iter->w);
+}
+
+static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+                                                   struct net *net)
+{
+       unsigned int h;
+       struct hlist_node *node;
+
+       if (tbl) {
+               h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+               node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+       } else {
+               h = 0;
+               node = NULL;
+       }
+
+       while (!node && h < FIB6_TABLE_HASHSZ) {
+               node = rcu_dereference_bh(
+                       hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+       }
+       return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+}
+
+static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+{
+       if (iter->sernum != iter->w.root->fn_sernum) {
+               iter->sernum = iter->w.root->fn_sernum;
+               iter->w.state = FWS_INIT;
+               iter->w.node = iter->w.root;
+               WARN_ON(iter->w.skip);
+               iter->w.skip = iter->w.count;
+       }
+}
+
+static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       int r;
+       struct rt6_info *n;
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (!v)
+               goto iter_table;
+
+       n = ((struct rt6_info *)v)->dst.rt6_next;
+       if (n) {
+               ++*pos;
+               return n;
+       }
+
+iter_table:
+       ipv6_route_check_sernum(iter);
+       read_lock(&iter->tbl->tb6_lock);
+       r = fib6_walk_continue(&iter->w);
+       read_unlock(&iter->tbl->tb6_lock);
+       if (r > 0) {
+               if (v)
+                       ++*pos;
+               return iter->w.leaf;
+       } else if (r < 0) {
+               fib6_walker_unlink(&iter->w);
+               return NULL;
+       }
+       fib6_walker_unlink(&iter->w);
+
+       iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
+       if (!iter->tbl)
+               return NULL;
+
+       ipv6_route_seq_setup_walk(iter);
+       goto iter_table;
+}
+
+static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU_BH)
+{
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       rcu_read_lock_bh();
+       iter->tbl = ipv6_route_seq_next_table(NULL, net);
+       iter->skip = *pos;
+
+       if (iter->tbl) {
+               ipv6_route_seq_setup_walk(iter);
+               return ipv6_route_seq_next(seq, NULL, pos);
+       } else {
+               return NULL;
+       }
+}
+
+static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+{
+       struct fib6_walker_t *w = &iter->w;
+       return w->node && !(w->state == FWS_U && w->node == w->root);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+       __releases(RCU_BH)
+{
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (ipv6_route_iter_active(iter))
+               fib6_walker_unlink(&iter->w);
+
+       rcu_read_unlock_bh();
+}
+
+static const struct seq_operations ipv6_route_seq_ops = {
+       .start  = ipv6_route_seq_start,
+       .next   = ipv6_route_seq_next,
+       .stop   = ipv6_route_seq_stop,
+       .show   = ipv6_route_seq_show
+};
+
+int ipv6_route_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &ipv6_route_seq_ops,
+                           sizeof(struct ipv6_route_iter));
+}
+
+#endif /* CONFIG_PROC_FS */
index 6b26e9feafb98eb8d269f553cbf8339a3341526b..bf4a9a084de5aa8f733318276d6e84cc37d5e249 100644 (file)
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
        struct ip6_tnl *tunnel = netdev_priv(dev);
        struct net_device *tdev;    /* Device to other host */
        struct ipv6hdr  *ipv6h;     /* Our new IP header */
-       unsigned int max_headroom /* The extra header space needed */
+       unsigned int max_headroom = 0; /* The extra header space needed */
        int    gre_hlen;
        struct ipv6_tel_txoption opt;
        int    mtu;
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
 
-       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+       max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
 
        if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
            (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                if (t->parms.o_flags&GRE_SEQ)
                        addend += 4;
        }
+       t->hlen = addend;
 
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                }
                ip6_rt_put(rt);
        }
-
-       t->hlen = addend;
 }
 
 static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
 
 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct ip6_tnl *tunnel = netdev_priv(dev);
        if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+           new_mtu > 0xFFF8 - dev->hard_header_len)
                return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
index d82de72281009c1b93d1cbde1bac91c023ecd930..4b851692b1f6bed3fbe476c65672c45e28570b3e 100644 (file)
@@ -66,7 +66,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
        __skb_pull(skb, sizeof(*ipv6h));
        err = -EPROTONOSUPPORT;
 
-       rcu_read_lock();
        ops = rcu_dereference(inet6_offloads[
                ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
 
@@ -74,7 +73,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
                skb_reset_transport_header(skb);
                err = ops->callbacks.gso_send_check(skb);
        }
-       rcu_read_unlock();
 
 out:
        return err;
@@ -92,46 +90,58 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        u8 *prevhdr;
        int offset = 0;
        bool tunnel;
+       int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
                     ~(SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
+                      SKB_GSO_IPIP |
+                      SKB_GSO_SIT |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_MPLS |
                       SKB_GSO_TCPV6 |
                       0)))
                goto out;
 
+       skb_reset_network_header(skb);
+       nhoff = skb_network_header(skb) - skb_mac_header(skb);
        if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
                goto out;
 
-       tunnel = skb->encapsulation;
+       tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+       if (tunnel)
+               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
+
        ipv6h = ipv6_hdr(skb);
        __skb_pull(skb, sizeof(*ipv6h));
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
        proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
-       rcu_read_lock();
+
        ops = rcu_dereference(inet6_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment)) {
                skb_reset_transport_header(skb);
                segs = ops->callbacks.gso_segment(skb, features);
        }
-       rcu_read_unlock();
 
        if (IS_ERR(segs))
                goto out;
 
        for (skb = segs; skb; skb = skb->next) {
-               ipv6h = ipv6_hdr(skb);
-               ipv6h->payload_len = htons(skb->len - skb->mac_len -
-                                          sizeof(*ipv6h));
+               ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
+               ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+               if (tunnel) {
+                       skb_reset_inner_headers(skb);
+                       skb->encapsulation = 1;
+               }
+               skb->network_header = (u8 *)ipv6h - skb->head;
+
                if (!tunnel && proto == IPPROTO_UDP) {
                        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-                       fptr = (struct frag_hdr *)(skb_network_header(skb) +
-                               unfrag_ip6hlen);
+                       fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
                        fptr->frag_off = htons(offset);
                        if (skb->next != NULL)
                                fptr->frag_off |= htons(IP6_MF);
@@ -267,6 +277,13 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
        },
 };
 
+static const struct net_offload sit_offload = {
+       .callbacks = {
+               .gso_send_check = ipv6_gso_send_check,
+               .gso_segment    = ipv6_gso_segment,
+       },
+};
+
 static int __init ipv6_offload_init(void)
 {
 
@@ -278,6 +295,9 @@ static int __init ipv6_offload_init(void)
                pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
 
        dev_add_offload(&ipv6_packet_offload);
+
+       inet_add_offload(&sit_offload, IPPROTO_IPV6);
+
        return 0;
 }
 
index 3a692d5291636571266c6e26293bc7054228f4e3..91fb4e8212f52d434e6d072103f70dbb8d219326 100644 (file)
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock_bh();
-       nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+       nexthop = rt6_nexthop((struct rt6_info *)dst);
        neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         */
        rt = (struct rt6_info *) *dst;
        rcu_read_lock_bh();
-       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
        err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
        rcu_read_unlock_bh();
 
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
 {
        struct sk_buff *skb;
+       struct frag_hdr fhdr;
        int err;
 
        /* There is support for UDP large send offload by network
@@ -1034,33 +1035,26 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->transport_header = skb->network_header + fragheaderlen;
 
                skb->protocol = htons(ETH_P_IPV6);
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-       }
-
-       err = skb_append_datato_frags(sk,skb, getfrag, from,
-                                     (length - transhdrlen));
-       if (!err) {
-               struct frag_hdr fhdr;
 
-               /* Specify the length of each IPv6 datagram fragment.
-                * It has to be a multiple of 8.
-                */
-               skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
-                                            sizeof(struct frag_hdr)) & ~7;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-               ipv6_select_ident(&fhdr, rt);
-               skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
                __skb_queue_tail(&sk->sk_write_queue, skb);
-
-               return 0;
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
-       /* There is not enough support do UPD LSO,
-        * so follow normal path
-        */
-       kfree_skb(skb);
 
-       return err;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* Specify the length of each IPv6 datagram fragment.
+        * It has to be a multiple of 8.
+        */
+       skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+                                    sizeof(struct frag_hdr)) & ~7;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+       ipv6_select_ident(&fhdr, rt);
+       skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+
+append:
+       return skb_append_datato_frags(sk, skb, getfrag, from,
+                                      (length - transhdrlen));
 }
 
 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1227,27 +1221,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
         * --yoshfuji
         */
 
-       cork->length += length;
-       if (length > mtu) {
-               int proto = sk->sk_protocol;
-               if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
-                       ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
-                       return -EMSGSIZE;
-               }
-
-               if (proto == IPPROTO_UDP &&
-                   (rt->dst.dev->features & NETIF_F_UFO)) {
+       if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
+                                          sk->sk_protocol == IPPROTO_RAW)) {
+               ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+               return -EMSGSIZE;
+       }
 
-                       err = ip6_ufo_append_data(sk, getfrag, from, length,
-                                                 hh_len, fragheaderlen,
-                                                 transhdrlen, mtu, flags, rt);
-                       if (err)
-                               goto error;
-                       return 0;
-               }
+       skb = skb_peek_tail(&sk->sk_write_queue);
+       cork->length += length;
+       if (((length > mtu) ||
+            (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+           (rt->dst.dev->features & NETIF_F_UFO)) {
+               err = ip6_ufo_append_data(sk, getfrag, from, length,
+                                         hh_len, fragheaderlen,
+                                         transhdrlen, mtu, flags, rt);
+               if (err)
+                       goto error;
+               return 0;
        }
 
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+       if (!skb)
                goto alloc_new_skb;
 
        while (length > 0) {
index 2d8f4829575b2d410ce74014287b97361e2abf38..583b77e2f69be1d1499da479e2f8da1435d22a97 100644 (file)
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static int
 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if (new_mtu < IPV6_MIN_MTU) {
-               return -EINVAL;
+       struct ip6_tnl *tnl = netdev_priv(dev);
+
+       if (tnl->parms.proto == IPPROTO_IPIP) {
+               if (new_mtu < 68)
+                       return -EINVAL;
+       } else {
+               if (new_mtu < IPV6_MIN_MTU)
+                       return -EINVAL;
        }
+       if (new_mtu > 0xFFF8 - dev->hard_header_len)
+               return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
 }
@@ -1731,8 +1739,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
                }
        }
 
-       t = rtnl_dereference(ip6n->tnls_wc[0]);
-       unregister_netdevice_queue(t->dev, &list);
        unregister_netdevice_many(&list);
 }
 
@@ -1752,6 +1758,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
new file mode 100644 (file)
index 0000000..ed94ba6
--- /dev/null
@@ -0,0 +1,1056 @@
+/*
+ *     IPv6 virtual tunneling interface
+ *
+ *     Copyright (C) 2013 secunet Security Networks AG
+ *
+ *     Author:
+ *     Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ *     Based on:
+ *     net/ipv6/ip6_tunnel.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/icmp.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmpv6.h>
+#include <linux/init.h>
+#include <linux/route.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+       u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+       return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+static int vti6_dev_init(struct net_device *dev);
+static void vti6_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops vti6_link_ops __read_mostly;
+
+static int vti6_net_id __read_mostly;
+struct vti6_net {
+       /* the vti6 tunnel fallback device */
+       struct net_device *fb_tnl_dev;
+       /* lists for storing tunnels in use */
+       struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+       struct ip6_tnl __rcu *tnls_wc[1];
+       struct ip6_tnl __rcu **tnls[2];
+};
+
+static struct net_device_stats *vti6_get_stats(struct net_device *dev)
+{
+       struct pcpu_tstats sum = { 0 };
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+               sum.rx_packets += tstats->rx_packets;
+               sum.rx_bytes   += tstats->rx_bytes;
+               sum.tx_packets += tstats->tx_packets;
+               sum.tx_bytes   += tstats->tx_bytes;
+       }
+       dev->stats.rx_packets = sum.rx_packets;
+       dev->stats.rx_bytes   = sum.rx_bytes;
+       dev->stats.tx_packets = sum.tx_packets;
+       dev->stats.tx_bytes   = sum.tx_bytes;
+       return &dev->stats;
+}
+
+#define for_each_vti6_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/**
+ * vti6_tnl_lookup - fetch tunnel matching the end-point addresses
+ *   @net: network namespace
+ *   @remote: the address of the tunnel exit-point
+ *   @local: the address of the tunnel entry-point
+ *
+ * Return:
+ *   tunnel matching given end-points if found,
+ *   else fallback tunnel if its device is up,
+ *   else %NULL
+ **/
+static struct ip6_tnl *
+vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
+               const struct in6_addr *local)
+{
+       unsigned int hash = HASH(remote, local);
+       struct ip6_tnl *t;
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   (t->dev->flags & IFF_UP))
+                       return t;
+       }
+       t = rcu_dereference(ip6n->tnls_wc[0]);
+       if (t && (t->dev->flags & IFF_UP))
+               return t;
+
+       return NULL;
+}
+
+/**
+ * vti6_tnl_bucket - get head of list matching given tunnel parameters
+ *   @p: parameters containing tunnel end-points
+ *
+ * Description:
+ *   vti6_tnl_bucket() returns the head of the list matching the
+ *   &struct in6_addr entries laddr and raddr in @p.
+ *
+ * Return: head of IPv6 tunnel list
+ **/
+static struct ip6_tnl __rcu **
+vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       unsigned int h = 0;
+       int prio = 0;
+
+       if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+               prio = 1;
+               h = HASH(remote, local);
+       }
+       return &ip6n->tnls[prio][h];
+}
+
+static void
+vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms);
+
+       rcu_assign_pointer(t->next , rtnl_dereference(*tp));
+       rcu_assign_pointer(*tp, t);
+}
+
+static void
+vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *iter;
+
+       for (tp = vti6_tnl_bucket(ip6n, &t->parms);
+            (iter = rtnl_dereference(*tp)) != NULL;
+            tp = &iter->next) {
+               if (t == iter) {
+                       rcu_assign_pointer(*tp, t->next);
+                       break;
+               }
+       }
+}
+
+static void vti6_dev_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
+static int vti6_tnl_create2(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err;
+
+       err = vti6_dev_init(dev);
+       if (err < 0)
+               goto out;
+
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto out;
+
+       strcpy(t->parms.name, dev->name);
+       dev->rtnl_link_ops = &vti6_link_ops;
+
+       dev_hold(dev);
+       vti6_tnl_link(ip6n, t);
+
+       return 0;
+
+out:
+       return err;
+}
+
+static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+{
+       struct net_device *dev;
+       struct ip6_tnl *t;
+       char name[IFNAMSIZ];
+       int err;
+
+       if (p->name[0])
+               strlcpy(name, p->name, IFNAMSIZ);
+       else
+               sprintf(name, "ip6_vti%%d");
+
+       dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+       if (dev == NULL)
+               goto failed;
+
+       dev_net_set(dev, net);
+
+       t = netdev_priv(dev);
+       t->parms = *p;
+       t->net = dev_net(dev);
+
+       err = vti6_tnl_create2(dev);
+       if (err < 0)
+               goto failed_free;
+
+       return t;
+
+failed_free:
+       vti6_dev_free(dev);
+failed:
+       return NULL;
+}
+
+/**
+ * vti6_locate - find or create tunnel matching given parameters
+ *   @net: network namespace
+ *   @p: tunnel parameters
+ *   @create: != 0 if allowed to create new tunnel if no match found
+ *
+ * Description:
+ *   vti6_locate() first tries to locate an existing tunnel
+ *   based on @parms. If this is unsuccessful, but @create is set a new
+ *   tunnel device is created and registered for use.
+ *
+ * Return:
+ *   matching tunnel or NULL
+ **/
+static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
+                                  int create)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *t;
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       for (tp = vti6_tnl_bucket(ip6n, p);
+            (t = rtnl_dereference(*tp)) != NULL;
+            tp = &t->next) {
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr))
+                       return t;
+       }
+       if (!create)
+               return NULL;
+       return vti6_tnl_create(net, p);
+}
+
+/**
+ * vti6_dev_uninit - tunnel device uninitializer
+ *   @dev: the device to be destroyed
+ *
+ * Description:
+ *   vti6_dev_uninit() removes tunnel from its list
+ **/
+static void vti6_dev_uninit(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev == ip6n->fb_tnl_dev)
+               RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+       else
+               vti6_tnl_unlink(ip6n, t);
+       ip6_tnl_dst_reset(t);
+       dev_put(dev);
+}
+
+static int vti6_rcv(struct sk_buff *skb)
+{
+       struct ip6_tnl *t;
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+       rcu_read_lock();
+
+       if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
+                                &ipv6h->daddr)) != NULL) {
+               struct pcpu_tstats *tstats;
+
+               if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
+                       rcu_read_unlock();
+                       goto discard;
+               }
+
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+                       rcu_read_unlock();
+                       return 0;
+               }
+
+               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+                       t->dev->stats.rx_dropped++;
+                       rcu_read_unlock();
+                       goto discard;
+               }
+
+               tstats = this_cpu_ptr(t->dev->tstats);
+               tstats->rx_packets++;
+               tstats->rx_bytes += skb->len;
+
+               skb->mark = 0;
+               secpath_reset(skb);
+               skb->dev = t->dev;
+
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
+       return 1;
+
+discard:
+       kfree_skb(skb);
+       return 0;
+}
+
+/**
+ * vti6_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+static inline bool
+vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
+{
+       return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+/**
+ * vti6_xmit - send a packet
+ *   @skb: the outgoing socket buffer
+ *   @dev: the outgoing tunnel device
+ **/
+static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct flowi6 fl6;
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct net_device *tdev;
+       int err = -1;
+
+       if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+           !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+               return err;
+
+       dst = ip6_tnl_dst_check(t);
+       if (!dst) {
+               memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+
+               ndst = ip6_route_output(net, NULL, &fl6);
+
+               if (ndst->error)
+                       goto tx_err_link_failure;
+               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(&fl6), NULL, 0);
+               if (IS_ERR(ndst)) {
+                       err = PTR_ERR(ndst);
+                       ndst = NULL;
+                       goto tx_err_link_failure;
+               }
+               dst = ndst;
+       }
+
+       if (!dst->xfrm || dst->xfrm->props.mode != XFRM_MODE_TUNNEL)
+               goto tx_err_link_failure;
+
+       tdev = dst->dev;
+
+       if (tdev == dev) {
+               stats->collisions++;
+               net_warn_ratelimited("%s: Local routing loop detected!\n",
+                                    t->parms.name);
+               goto tx_err_dst_release;
+       }
+
+
+       skb_dst_drop(skb);
+       skb_dst_set_noref(skb, dst);
+
+       ip6tunnel_xmit(skb, dev);
+       if (ndst) {
+               dev->mtu = dst_mtu(ndst);
+               ip6_tnl_dst_store(t, ndst);
+       }
+
+       return 0;
+tx_err_link_failure:
+       stats->tx_carrier_errors++;
+       dst_link_failure(skb);
+tx_err_dst_release:
+       dst_release(ndst);
+       return err;
+}
+
+static netdev_tx_t
+vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       int ret;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IPV6):
+               ret = vti6_xmit(skb, dev);
+               break;
+       default:
+               goto tx_err;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       stats->tx_errors++;
+       stats->tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void vti6_link_config(struct ip6_tnl *t)
+{
+       struct dst_entry *dst;
+       struct net_device *dev = t->dev;
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+
+       memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+
+       /* Set up flowi template */
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
+       fl6->flowi6_oif = p->link;
+       fl6->flowi6_mark = be32_to_cpu(p->i_key);
+       fl6->flowi6_proto = p->proto;
+       fl6->flowlabel = 0;
+
+       p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
+                     IP6_TNL_F_CAP_PER_PACKET);
+       p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV)
+               dev->flags |= IFF_POINTOPOINT;
+       else
+               dev->flags &= ~IFF_POINTOPOINT;
+
+       dev->iflink = p->link;
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+
+               dst = ip6_route_output(dev_net(dev), NULL, fl6);
+               if (dst->error)
+                       return;
+
+               dst = xfrm_lookup(dev_net(dev), dst, flowi6_to_flowi(fl6),
+                                 NULL, 0);
+               if (IS_ERR(dst))
+                       return;
+
+               if (dst->dev) {
+                       dev->hard_header_len = dst->dev->hard_header_len;
+
+                       dev->mtu = dst_mtu(dst);
+
+                       if (dev->mtu < IPV6_MIN_MTU)
+                               dev->mtu = IPV6_MIN_MTU;
+               }
+               dst_release(dst);
+       }
+}
+
+/**
+ * vti6_tnl_change - update the tunnel parameters
+ *   @t: tunnel to be changed
+ *   @p: tunnel configuration parameters
+ *
+ * Description:
+ *   vti6_tnl_change() updates the tunnel parameters
+ **/
+static int
+vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
+{
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
+       t->parms.link = p->link;
+       t->parms.i_key = p->i_key;
+       t->parms.o_key = p->o_key;
+       t->parms.proto = p->proto;
+       ip6_tnl_dst_reset(t);
+       vti6_link_config(t);
+       return 0;
+}
+
+static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+       struct net *net = dev_net(t->dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err;
+
+       vti6_tnl_unlink(ip6n, t);
+       synchronize_net();
+       err = vti6_tnl_change(t, p);
+       vti6_tnl_link(ip6n, t);
+       netdev_state_change(t->dev);
+       return err;
+}
+
+static void
+vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->link = u->link;
+       p->i_key = u->i_key;
+       p->o_key = u->o_key;
+       p->proto = u->proto;
+
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+{
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->link = p->link;
+       u->i_key = p->i_key;
+       u->o_key = p->o_key;
+       u->proto = p->proto;
+
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
+/**
+ * vti6_tnl_ioctl - configure vti6 tunnels from userspace
+ *   @dev: virtual device associated with tunnel
+ *   @ifr: parameters passed from userspace
+ *   @cmd: command to be performed
+ *
+ * Description:
+ *   vti6_ioctl() is used for managing vti6 tunnels
+ *   from userspace.
+ *
+ *   The possible commands are the following:
+ *     %SIOCGETTUNNEL: get tunnel parameters for device
+ *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
+ *     %SIOCCHGTUNNEL: change tunnel parameters to those given
+ *     %SIOCDELTUNNEL: delete tunnel
+ *
+ *   The fallback device "ip6_vti0", created during module
+ *   initialization, can be used for creating other tunnel devices.
+ *
+ * Return:
+ *   0 on success,
+ *   %-EFAULT if unable to copy data to or from userspace,
+ *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
+ *   %-EINVAL if passed tunnel parameters are invalid,
+ *   %-EEXIST if changing a tunnel's parameters would cause a conflict
+ *   %-ENODEV if attempting to change or delete a nonexisting device
+ **/
+static int
+vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       int err = 0;
+       struct ip6_tnl_parm2 p;
+       struct __ip6_tnl_parm p1;
+       struct ip6_tnl *t = NULL;
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       switch (cmd) {
+       case SIOCGETTUNNEL:
+               if (dev == ip6n->fb_tnl_dev) {
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       vti6_parm_from_user(&p1, &p);
+                       t = vti6_locate(net, &p1, 0);
+               } else {
+                       memset(&p, 0, sizeof(p));
+               }
+               if (t == NULL)
+                       t = netdev_priv(dev);
+               vti6_parm_to_user(&p, &t->parms);
+               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                       err = -EFAULT;
+               break;
+       case SIOCADDTUNNEL:
+       case SIOCCHGTUNNEL:
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+               err = -EFAULT;
+               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                       break;
+               err = -EINVAL;
+               if (p.proto != IPPROTO_IPV6  && p.proto != 0)
+                       break;
+               vti6_parm_from_user(&p1, &p);
+               t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
+               if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+                       if (t != NULL) {
+                               if (t->dev != dev) {
+                                       err = -EEXIST;
+                                       break;
+                               }
+                       } else
+                               t = netdev_priv(dev);
+
+                       err = vti6_update(t, &p1);
+               }
+               if (t) {
+                       err = 0;
+                       vti6_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                               err = -EFAULT;
+
+               } else
+                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               break;
+       case SIOCDELTUNNEL:
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+
+               if (dev == ip6n->fb_tnl_dev) {
+                       err = -EFAULT;
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                               break;
+                       err = -ENOENT;
+                       vti6_parm_from_user(&p1, &p);
+                       t = vti6_locate(net, &p1, 0);
+                       if (t == NULL)
+                               break;
+                       err = -EPERM;
+                       if (t->dev == ip6n->fb_tnl_dev)
+                               break;
+                       dev = t->dev;
+               }
+               err = 0;
+               unregister_netdevice(dev);
+               break;
+       default:
+               err = -EINVAL;
+       }
+       return err;
+}
+
+/**
+ * vti6_tnl_change_mtu - change mtu manually for tunnel device
+ *   @dev: virtual device associated with tunnel
+ *   @new_mtu: the new mtu
+ *
+ * Return:
+ *   0 on success,
+ *   %-EINVAL if mtu too small
+ **/
+static int vti6_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if (new_mtu < IPV6_MIN_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static const struct net_device_ops vti6_netdev_ops = {
+       .ndo_uninit     = vti6_dev_uninit,
+       .ndo_start_xmit = vti6_tnl_xmit,
+       .ndo_do_ioctl   = vti6_ioctl,
+       .ndo_change_mtu = vti6_change_mtu,
+       .ndo_get_stats  = vti6_get_stats,
+};
+
+/**
+ * vti6_dev_setup - setup virtual tunnel device
+ *   @dev: virtual device associated with tunnel
+ *
+ * Description:
+ *   Initialize function pointers and device parameters
+ **/
+static void vti6_dev_setup(struct net_device *dev)
+{
+       struct ip6_tnl *t;
+
+       dev->netdev_ops = &vti6_netdev_ops;
+       dev->destructor = vti6_dev_free;
+
+       dev->type = ARPHRD_TUNNEL6;
+       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
+       dev->mtu = ETH_DATA_LEN;
+       t = netdev_priv(dev);
+       dev->flags |= IFF_NOARP;
+       dev->addr_len = sizeof(struct in6_addr);
+       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+/**
+ * vti6_dev_init_gen - general initializer for all tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static inline int vti6_dev_init_gen(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+
+       t->dev = dev;
+       t->net = dev_net(dev);
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+       return 0;
+}
+
+/**
+ * vti6_dev_init - initializer for all non fallback tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static int vti6_dev_init(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       int err = vti6_dev_init_gen(dev);
+
+       if (err)
+               return err;
+       vti6_link_config(t);
+       return 0;
+}
+
+/**
+ * vti6_fb_tnl_dev_init - initializer for fallback tunnel device
+ *   @dev: fallback device
+ *
+ * Return: 0
+ **/
+static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err = vti6_dev_init_gen(dev);
+
+       if (err)
+               return err;
+
+       t->parms.proto = IPPROTO_IPV6;
+       dev_hold(dev);
+
+       vti6_link_config(t);
+
+       rcu_assign_pointer(ip6n->tnls_wc[0], t);
+       return 0;
+}
+
+static int vti6_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       return 0;
+}
+
+static void vti6_netlink_parms(struct nlattr *data[],
+                              struct __ip6_tnl_parm *parms)
+{
+       memset(parms, 0, sizeof(*parms));
+
+       if (!data)
+               return;
+
+       if (data[IFLA_VTI_LINK])
+               parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+       if (data[IFLA_VTI_LOCAL])
+               nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
+                          sizeof(struct in6_addr));
+
+       if (data[IFLA_VTI_REMOTE])
+               nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
+                          sizeof(struct in6_addr));
+
+       if (data[IFLA_VTI_IKEY])
+               parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+       if (data[IFLA_VTI_OKEY])
+               parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+}
+
+static int vti6_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *nt;
+
+       nt = netdev_priv(dev);
+       vti6_netlink_parms(data, &nt->parms);
+
+       nt->parms.proto = IPPROTO_IPV6;
+
+       if (vti6_locate(net, &nt->parms, 0))
+               return -EEXIST;
+
+       return vti6_tnl_create2(dev);
+}
+
+static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
+                          struct nlattr *data[])
+{
+       struct ip6_tnl *t;
+       struct __ip6_tnl_parm p;
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev == ip6n->fb_tnl_dev)
+               return -EINVAL;
+
+       vti6_netlink_parms(data, &p);
+
+       t = vti6_locate(net, &p, 0);
+
+       if (t) {
+               if (t->dev != dev)
+                       return -EEXIST;
+       } else
+               t = netdev_priv(dev);
+
+       return vti6_update(t, &p);
+}
+
+static size_t vti6_get_size(const struct net_device *dev)
+{
+       return
+               /* IFLA_VTI_LINK */
+               nla_total_size(4) +
+               /* IFLA_VTI_LOCAL */
+               nla_total_size(sizeof(struct in6_addr)) +
+               /* IFLA_VTI_REMOTE */
+               nla_total_size(sizeof(struct in6_addr)) +
+               /* IFLA_VTI_IKEY */
+               nla_total_size(4) +
+               /* IFLA_VTI_OKEY */
+               nla_total_size(4) +
+               0;
+}
+
+static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct __ip6_tnl_parm *parm = &tunnel->parms;
+
+       if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
+           nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
+                   &parm->laddr) ||
+           nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
+                   &parm->raddr) ||
+           nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
+           nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+       [IFLA_VTI_LINK]         = { .type = NLA_U32 },
+       [IFLA_VTI_LOCAL]        = { .len = sizeof(struct in6_addr) },
+       [IFLA_VTI_REMOTE]       = { .len = sizeof(struct in6_addr) },
+       [IFLA_VTI_IKEY]         = { .type = NLA_U32 },
+       [IFLA_VTI_OKEY]         = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+       .kind           = "vti6",
+       .maxtype        = IFLA_VTI_MAX,
+       .policy         = vti6_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = vti6_dev_setup,
+       .validate       = vti6_validate,
+       .newlink        = vti6_newlink,
+       .changelink     = vti6_changelink,
+       .get_size       = vti6_get_size,
+       .fill_info      = vti6_fill_info,
+};
+
+static struct xfrm_tunnel_notifier vti6_handler __read_mostly = {
+       .handler        = vti6_rcv,
+       .priority       =       1,
+};
+
+static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
+{
+       int h;
+       struct ip6_tnl *t;
+       LIST_HEAD(list);
+
+       for (h = 0; h < HASH_SIZE; h++) {
+               t = rtnl_dereference(ip6n->tnls_r_l[h]);
+               while (t != NULL) {
+                       unregister_netdevice_queue(t->dev, &list);
+                       t = rtnl_dereference(t->next);
+               }
+       }
+
+       t = rtnl_dereference(ip6n->tnls_wc[0]);
+       unregister_netdevice_queue(t->dev, &list);
+       unregister_netdevice_many(&list);
+}
+
+static int __net_init vti6_init_net(struct net *net)
+{
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       struct ip6_tnl *t = NULL;
+       int err;
+
+       ip6n->tnls[0] = ip6n->tnls_wc;
+       ip6n->tnls[1] = ip6n->tnls_r_l;
+
+       err = -ENOMEM;
+       ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
+                                       vti6_dev_setup);
+
+       if (!ip6n->fb_tnl_dev)
+               goto err_alloc_dev;
+       dev_net_set(ip6n->fb_tnl_dev, net);
+
+       err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+       if (err < 0)
+               goto err_register;
+
+       err = register_netdev(ip6n->fb_tnl_dev);
+       if (err < 0)
+               goto err_register;
+
+       t = netdev_priv(ip6n->fb_tnl_dev);
+
+       strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
+       return 0;
+
+err_register:
+       vti6_dev_free(ip6n->fb_tnl_dev);
+err_alloc_dev:
+       return err;
+}
+
+static void __net_exit vti6_exit_net(struct net *net)
+{
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       rtnl_lock();
+       vti6_destroy_tunnels(ip6n);
+       rtnl_unlock();
+}
+
+static struct pernet_operations vti6_net_ops = {
+       .init = vti6_init_net,
+       .exit = vti6_exit_net,
+       .id   = &vti6_net_id,
+       .size = sizeof(struct vti6_net),
+};
+
+/**
+ * vti6_tunnel_init - register protocol and reserve needed resources
+ *
+ * Return: 0 on success
+ **/
+static int __init vti6_tunnel_init(void)
+{
+       int  err;
+
+       err = register_pernet_device(&vti6_net_ops);
+       if (err < 0)
+               goto out_pernet;
+
+       err = xfrm6_mode_tunnel_input_register(&vti6_handler);
+       if (err < 0) {
+               pr_err("%s: can't register vti6\n", __func__);
+               goto out;
+       }
+       err = rtnl_link_register(&vti6_link_ops);
+       if (err < 0)
+               goto rtnl_link_failed;
+
+       return 0;
+
+rtnl_link_failed:
+       xfrm6_mode_tunnel_input_deregister(&vti6_handler);
+out:
+       unregister_pernet_device(&vti6_net_ops);
+out_pernet:
+       return err;
+}
+
+/**
+ * vti6_tunnel_cleanup - free resources and unregister protocol
+ **/
+static void __exit vti6_tunnel_cleanup(void)
+{
+       rtnl_link_unregister(&vti6_link_ops);
+       if (xfrm6_mode_tunnel_input_deregister(&vti6_handler))
+               pr_info("%s: can't deregister vti6\n", __func__);
+
+       unregister_pernet_device(&vti6_net_ops);
+}
+
+module_init(vti6_tunnel_init);
+module_exit(vti6_tunnel_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti6");
+MODULE_ALIAS_NETDEV("ip6_vti0");
+MODULE_AUTHOR("Steffen Klassert");
+MODULE_DESCRIPTION("IPv6 virtual tunnel interface");
index 5636a912074acb8ecf445d9d6df753ff8e3eba95..ce507d9e1c900d3990e6025b37087309f850eaca 100644 (file)
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                (struct ip_comp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index d1e2e8ef29c54abd86064728ec145f0478f360af..4919a8e6063ed81b608bfe497a3d7d96f7fb3ca8 100644 (file)
@@ -174,7 +174,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                        }
 
                        if (ipv6_only_sock(sk) ||
-                           !ipv6_addr_v4mapped(&np->daddr)) {
+                           !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
                                retv = -EADDRNOTAVAIL;
                                break;
                        }
@@ -1011,7 +1011,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+                               src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxhlim) {
@@ -1026,7 +1026,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+                               src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr :
+                                                                    np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxohlim) {
index 096cd67b737c4b4eba2d42470d7a58e2e229153c..d18f9f903db62333983d3fad0e1ccb9298762c98 100644 (file)
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data)
                if (idev->mc_dad_count)
                        mld_dad_start_timer(idev, idev->mc_maxdelay);
        }
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data)
 
        idev->mc_gq_running = 0;
        mld_send_report(idev, NULL);
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static void mld_ifc_timer_expire(unsigned long data)
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data)
                if (idev->mc_ifc_count)
                        mld_ifc_start_timer(idev, idev->mc_maxdelay);
        }
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static void mld_ifc_event(struct inet6_dev *idev)
index a7f842b29b67b2ca5ff2c2cbe13588e3098ce44b..7702f9e90a043ac458b63a2e0a07ed3975d37aa6 100644 (file)
@@ -25,6 +25,19 @@ config NF_CONNTRACK_IPV6
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_TABLES_IPV6
+       depends on NF_TABLES
+       tristate "IPv6 nf_tables support"
+
+config NFT_CHAIN_ROUTE_IPV6
+       depends on NF_TABLES_IPV6
+       tristate "IPv6 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV6
+       depends on NF_TABLES_IPV6
+       depends on NF_NAT_IPV6 && NFT_NAT
+       tristate "IPv6 nf_tables nat chain support"
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
index 2b53738f798cd3a9898d89a2a1a76b48ffe983b8..d1b4928f34f7ba3f80200ec0b9db67c12d47715b 100644 (file)
@@ -23,6 +23,11 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
+# nf_tables
+obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
index 44400c216dc6361e4607fe9af3e2999639766a11..710238f58aa93c0319cf90adb0c203c35f8bc7e4 100644 (file)
@@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb,
        local_bh_disable();
        addend = xt_write_recseq_begin();
        private = table->private;
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        cpu        = smp_processor_id();
        table_base = private->entries[cpu];
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
index 19cfea8dbcaa0547c85e40f4217105ef46c7b683..bf9f612c1bc24eb9eb0b26fbf7d0a62f74932735 100644 (file)
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (th == NULL)
                return NF_DROP;
 
-       synproxy_parse_options(skb, par->thoff, th, &opts);
+       if (!synproxy_parse_options(skb, par->thoff, th, &opts))
+               return NF_DROP;
 
        if (th->syn && !(th->ack || th->fin || th->rst)) {
                /* Initial SYN from client */
@@ -311,7 +312,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
 
                /* fall through */
        case TCP_CONNTRACK_SYN_SENT:
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
 
                if (!th->syn && th->ack &&
                    CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
                if (!th->syn || !th->ack)
                        break;
 
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
+
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy->tsoff = opts.tsval - synproxy->its;
 
index 29b44b14c5ea84bfa50f96d9e6f4f9914dc1e828..ca7f6c1280861b2977dce643fdea349eb3ec5078 100644 (file)
@@ -32,13 +32,14 @@ static const struct xt_table packet_filter = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index c705907ae6ab4178f683c0432b45aff0630ad2f5..307bbb782d147011d689f04c92e0ba5ac7c13074 100644 (file)
@@ -76,17 +76,17 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       if (hook == NF_INET_LOCAL_OUT)
+       if (ops->hooknum == NF_INET_LOCAL_OUT)
                return ip6t_mangle_out(skb, out);
-       if (hook == NF_INET_POST_ROUTING)
-               return ip6t_do_table(skb, hook, in, out,
+       if (ops->hooknum == NF_INET_POST_ROUTING)
+               return ip6t_do_table(skb, ops->hooknum, in, out,
                                     dev_net(out)->ipv6.ip6table_mangle);
        /* INPUT/FORWARD */
-       return ip6t_do_table(skb, hook, in, out,
+       return ip6t_do_table(skb, ops->hooknum, in, out,
                             dev_net(in)->ipv6.ip6table_mangle);
 }
 
index 9b076d2d3a7b6ec6a495302efd97324e9bc95b14..84c7f33d0cf858115abdb5f359a658b574637d57 100644 (file)
@@ -63,7 +63,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_fn(unsigned int hooknum,
+nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -72,7 +72,7 @@ nf_nat_ipv6_fn(unsigned int hooknum,
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        struct nf_conn_nat *nat;
-       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
        __be16 frag_off;
        int hdrlen;
        u8 nexthdr;
@@ -111,7 +111,8 @@ nf_nat_ipv6_fn(unsigned int hooknum,
 
                if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
                        if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
-                                                            hooknum, hdrlen))
+                                                            ops->hooknum,
+                                                            hdrlen))
                                return NF_DROP;
                        else
                                return NF_ACCEPT;
@@ -124,14 +125,14 @@ nf_nat_ipv6_fn(unsigned int hooknum,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
                } else {
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                                goto oif_changed;
                }
                break;
@@ -140,11 +141,11 @@ nf_nat_ipv6_fn(unsigned int hooknum,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                        goto oif_changed;
        }
 
-       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
        nf_ct_kill_acct(ct, ctinfo, skb);
@@ -152,7 +153,7 @@ oif_changed:
 }
 
 static unsigned int
-nf_nat_ipv6_in(unsigned int hooknum,
+nf_nat_ipv6_in(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -161,7 +162,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
        unsigned int ret;
        struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
                skb_dst_drop(skb);
@@ -170,7 +171,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_out(unsigned int hooknum,
+nf_nat_ipv6_out(const struct nf_hook_ops *ops,
                struct sk_buff *skb,
                const struct net_device *in,
                const struct net_device *out,
@@ -187,7 +188,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -209,7 +210,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_local_fn(unsigned int hooknum,
+nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
@@ -224,7 +225,7 @@ nf_nat_ipv6_local_fn(unsigned int hooknum,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 9a626d86720fc6a0d1b4eeffa79a85b47a0dec2f..5274740acecc93b4550dabdd7f48fac3c04f67ac 100644 (file)
@@ -19,13 +19,14 @@ static const struct xt_table packet_raw = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                  const struct net_device *in, const struct net_device *out,
                  int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index ce88d1d7e5255d69ee102229e463f12701bdac99..ab3b0219ecfa436c07eb5cb86af36bd04efbdfb7 100644 (file)
@@ -36,14 +36,15 @@ static const struct xt_table security_table = {
 };
 
 static unsigned int
-ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                       const struct net_device *in,
                       const struct net_device *out,
                       int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index d6e4dd8b58dfaf67a9f64fe8bfafcf2f9f6b24ea..486545eb42ce5c1a64ad55385e1bca31f2557687 100644 (file)
@@ -95,7 +95,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
        return NF_ACCEPT;
 }
 
-static unsigned int ipv6_helper(unsigned int hooknum,
+static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -133,7 +133,7 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        return helper->help(skb, protoff, ct, ctinfo);
 }
 
-static unsigned int ipv6_confirm(unsigned int hooknum,
+static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
                                 const struct net_device *in,
                                 const struct net_device *out,
@@ -219,16 +219,17 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
        return nf_conntrack_in(net, PF_INET6, hooknum, skb);
 }
 
-static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
+       return __ipv6_conntrack_in(dev_net(in), ops->hooknum, skb, in, out,
+                                  okfn);
 }
 
-static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -239,7 +240,8 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
+       return __ipv6_conntrack_in(dev_net(out), ops->hooknum, skb, in, out,
+                                  okfn);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -297,9 +299,9 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
        struct nf_conn *ct;
 
-       tuple.src.u3.in6 = inet6->rcv_saddr;
+       tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
        tuple.src.u.tcp.port = inet->inet_sport;
-       tuple.dst.u3.in6 = inet6->daddr;
+       tuple.dst.u3.in6 = sk->sk_v6_daddr;
        tuple.dst.u.tcp.port = inet->inet_dport;
        tuple.dst.protonum = sk->sk_protocol;
 
index dffdc1a389c57da1d16ebeda9b9aff44d6cd961a..4a258263d8ecbca612a008a390962fe0b9b68833 100644 (file)
@@ -144,12 +144,24 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
        return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 }
 
+static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
+                                const struct in6_addr *daddr)
+{
+       u32 c;
+
+       net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
+       c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+                        (__force u32)id, nf_frags.rnd);
+       return c & (INETFRAGS_HASHSZ - 1);
+}
+
+
 static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
        const struct frag_queue *nq;
 
        nq = container_of(q, struct frag_queue, q);
-       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+       return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
 }
 
 static void nf_skb_free(struct sk_buff *skb)
@@ -185,7 +197,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
        arg.ecn = ecn;
 
        read_lock_bh(&nf_frags.lock);
-       hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
+       hash = nf_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
        local_bh_enable();
index aacd121fe8c54365607f49e40679d7fd08dcd8e8..ec483aa3f60f82d7e66b0b121296774d0e56565f 100644 (file)
@@ -52,7 +52,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 
 }
 
-static unsigned int ipv6_defrag(unsigned int hooknum,
+static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -66,7 +66,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
                return NF_ACCEPT;
 #endif
 
-       reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+       reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(ops->hooknum, skb));
        /* queued */
        if (reasm == NULL)
                return NF_STOLEN;
@@ -75,7 +75,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
        if (reasm == skb)
                return NF_ACCEPT;
 
-       nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+       nf_ct_frag6_output(ops->hooknum, reasm, (struct net_device *)in,
                           (struct net_device *)out, okfn);
 
        return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
new file mode 100644 (file)
index 0000000..d77db8a
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
+                                   struct sk_buff *skb,
+                                   const struct net_device *in,
+                                   const struct net_device *out,
+                                   int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
+               if (net_ratelimit())
+                       pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
+                               "packet\n");
+               return NF_ACCEPT;
+       }
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv6 __read_mostly = {
+       .family         = NFPROTO_IPV6,
+       .nhooks         = NF_INET_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nft_ipv6_output,
+       },
+};
+
+static int nf_tables_ipv6_init_net(struct net *net)
+{
+       net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.ipv6 == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
+
+       if (nft_register_afinfo(net, net->nft.ipv6) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.ipv6);
+       return -ENOMEM;
+}
+
+static void nf_tables_ipv6_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.ipv6);
+       kfree(net->nft.ipv6);
+}
+
+static struct pernet_operations nf_tables_ipv6_net_ops = {
+       .init   = nf_tables_ipv6_init_net,
+       .exit   = nf_tables_ipv6_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       /* malformed packet, drop it */
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_INET_LOCAL_IN) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_FORWARD) |
+                         (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING),
+       .fn             = {
+               [NF_INET_LOCAL_IN]      = nft_do_chain_ipv6,
+               [NF_INET_LOCAL_OUT]     = nft_ipv6_output,
+               [NF_INET_FORWARD]       = nft_do_chain_ipv6,
+               [NF_INET_PRE_ROUTING]   = nft_do_chain_ipv6,
+               [NF_INET_POST_ROUTING]  = nft_do_chain_ipv6,
+       },
+};
+
+static int __init nf_tables_ipv6_init(void)
+{
+       nft_register_chain_type(&filter_ipv6);
+       return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+}
+
+static void __exit nf_tables_ipv6_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
+       nft_unregister_chain_type(&filter_ipv6);
+}
+
+module_init(nf_tables_ipv6_init);
+module_exit(nf_tables_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET6);
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
new file mode 100644 (file)
index 0000000..e86dcd7
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ipv6.h>
+
+/*
+ * IPv6 NAT chains
+ */
+
+static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+       __be16 frag_off;
+       int hdrlen;
+       u8 nexthdr;
+       struct nft_pktinfo pkt;
+       unsigned int ret;
+
+       if (ct == NULL || nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       nat = nfct_nat(ct);
+       if (nat == NULL) {
+               /* Conntrack module was loaded late, can't add extension. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL)
+                       return NF_ACCEPT;
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED + IP_CT_IS_REPLY:
+               nexthdr = ipv6_hdr(skb)->nexthdr;
+               hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                         &nexthdr, &frag_off);
+
+               if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+                       if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+                                                          ops->hooknum,
+                                                          hdrlen))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall through */
+       case IP_CT_NEW:
+               if (nf_nat_initialized(ct, maniptype))
+                       break;
+
+               nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+
+               ret = nft_do_chain_pktinfo(&pkt, ops);
+               if (ret != NF_ACCEPT)
+                       return ret;
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               }
+       default:
+               break;
+       }
+
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
+                                     const struct net_device *in,
+                                     const struct net_device *out,
+                                     int (*okfn)(struct sk_buff *))
+{
+       struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+               skb_dst_drop(skb);
+
+       return ret;
+}
+
+static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
+                                      const struct net_device *in,
+                                      const struct net_device *out,
+                                      int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo __maybe_unused;
+       const struct nf_conn *ct __maybe_unused;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                                     &ct->tuplehash[!dir].tuple.dst.u3) ||
+                   (ct->tuplehash[dir].tuple.src.u.all !=
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+                               ret = NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
+                                 struct sk_buff *skb,
+                                 const struct net_device *in,
+                                 const struct net_device *out,
+                                 int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+                                     &ct->tuplehash[!dir].tuple.src.u3)) {
+                       if (ip6_route_me_harder(skb))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET6))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "nat",
+       .type           = NFT_CHAIN_T_NAT,
+       .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .fn             = {
+               [NF_INET_PRE_ROUTING]   = nf_nat_ipv6_prerouting,
+               [NF_INET_POST_ROUTING]  = nf_nat_ipv6_postrouting,
+               [NF_INET_LOCAL_OUT]     = nf_nat_ipv6_output,
+               [NF_INET_LOCAL_IN]      = nf_nat_ipv6_fn,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_ipv6_init(void)
+{
+       int err;
+
+       err = nft_register_chain_type(&nft_chain_nat_ipv6);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_chain_nat_ipv6_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_nat_ipv6);
+}
+
+module_init(nft_chain_nat_ipv6_init);
+module_exit(nft_chain_nat_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
new file mode 100644 (file)
index 0000000..3fe40f0
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/route.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+                                       struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct nft_pktinfo pkt;
+       struct in6_addr saddr, daddr;
+       u_int8_t hop_limit;
+       u32 mark, flowlabel;
+
+       /* malformed packet, drop it */
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       /* save source/dest address, mark, hoplimit, flowlabel, priority */
+       memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
+       memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
+       mark = skb->mark;
+       hop_limit = ipv6_hdr(skb)->hop_limit;
+
+       /* flowlabel and prio (includes version, which shouldn't change either */
+       flowlabel = *((u32 *)ipv6_hdr(skb));
+
+       ret = nft_do_chain_pktinfo(&pkt, ops);
+       if (ret != NF_DROP && ret != NF_QUEUE &&
+           (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
+            memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
+            skb->mark != mark ||
+            ipv6_hdr(skb)->hop_limit != hop_limit ||
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
+               return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .fn             = {
+                [NF_INET_LOCAL_OUT]    = nf_route_table_hook,
+        },
+        .me            = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+       return nft_register_chain_type(&nft_chain_route_ipv6);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_route_ipv6);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "route");
index 18f19df4189f19734564b5fcfbd46ec22b67a1e8..8815e31a87fed4ba51ebd78a6724df8a3a436b85 100644 (file)
@@ -116,7 +116,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
        }
 
        if (!iif)
index 58916bbb17284ed441611d0210c9514141ff9bf4..3c00842b0079240f1788c83bfab346d5879336d8 100644 (file)
@@ -77,20 +77,19 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
 
        sk_for_each_from(sk)
                if (inet_sk(sk)->inet_num == num) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
 
                        if (!net_eq(sock_net(sk), net))
                                continue;
 
-                       if (!ipv6_addr_any(&np->daddr) &&
-                           !ipv6_addr_equal(&np->daddr, rmt_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
                                continue;
 
                        if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
                                continue;
 
-                       if (!ipv6_addr_any(&np->rcv_saddr)) {
-                               if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                               if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
                                        goto found;
                                if (is_multicast &&
                                    inet6_mc_check(sk, loc_addr, rmt_addr))
@@ -302,7 +301,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       np->rcv_saddr = addr->sin6_addr;
+       sk->sk_v6_rcv_saddr = addr->sin6_addr;
        if (!(addr_type & IPV6_ADDR_MULTICAST))
                np->saddr = addr->sin6_addr;
        err = 0;
@@ -335,8 +334,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
                ip6_sk_update_pmtu(skb, sk, info);
                harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
        }
-       if (type == NDISC_REDIRECT)
+       if (type == NDISC_REDIRECT) {
                ip6_sk_redirect(skb, sk);
+               return;
+       }
        if (np->recverr) {
                u8 *payload = skb->data;
                if (!inet->hdrincl)
@@ -802,8 +803,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
@@ -814,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                        return -EDESTADDRREQ;
 
                proto = inet->inet_num;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
        }
 
index 1aeb473b2cc695d8d2b0a3696972ec9228455d14..cc85a9ba50101c5abf86cb3e6d493bfb9b680c74 100644 (file)
@@ -82,24 +82,24 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
  * callers should be careful not to use the hash value outside the ipfrag_lock
  * as doing so could race with ipfrag_hash_rnd being recalculated.
  */
-unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-                            const struct in6_addr *daddr, u32 rnd)
+static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+                                   const struct in6_addr *daddr)
 {
        u32 c;
 
+       net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
        c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-                        (__force u32)id, rnd);
+                        (__force u32)id, ip6_frags.rnd);
 
        return c & (INETFRAGS_HASHSZ - 1);
 }
-EXPORT_SYMBOL_GPL(inet6_hash_frag);
 
 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 {
        struct frag_queue *fq;
 
        fq = container_of(q, struct frag_queue, q);
-       return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
+       return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
 }
 
 bool ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -193,7 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
        arg.ecn = ecn;
 
        read_lock(&ip6_frags.lock);
-       hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+       hash = inet6_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
        if (IS_ERR_OR_NULL(q)) {
index c979dd96d82a838534ae0105dbfb9e665ad410e6..5dc6ca6b66863a981b807df51341b35bfd93d2b6 100644 (file)
@@ -476,6 +476,24 @@ out:
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
+struct __rt6_probe_work {
+       struct work_struct work;
+       struct in6_addr target;
+       struct net_device *dev;
+};
+
+static void rt6_probe_deferred(struct work_struct *w)
+{
+       struct in6_addr mcaddr;
+       struct __rt6_probe_work *work =
+               container_of(w, struct __rt6_probe_work, work);
+
+       addrconf_addr_solict_mult(&work->target, &mcaddr);
+       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+       dev_put(work->dev);
+       kfree(w);
+}
+
 static void rt6_probe(struct rt6_info *rt)
 {
        struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
 
        if (!neigh ||
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-               struct in6_addr mcaddr;
-               struct in6_addr *target;
+               struct __rt6_probe_work *work;
 
-               if (neigh) {
+               work = kmalloc(sizeof(*work), GFP_ATOMIC);
+
+               if (neigh && work)
                        neigh->updated = jiffies;
+
+               if (neigh)
                        write_unlock(&neigh->lock);
-               }
 
-               target = (struct in6_addr *)&rt->rt6i_gateway;
-               addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
+               if (work) {
+                       INIT_WORK(&work->work, rt6_probe_deferred);
+                       work->target = rt->rt6i_gateway;
+                       dev_hold(rt->dst.dev);
+                       work->dev = rt->dst.dev;
+                       schedule_work(&work->work);
+               }
        } else {
 out:
                write_unlock(&neigh->lock);
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
                        if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
-                       rt->rt6i_gateway = *daddr;
                }
 
                rt->rt6i_flags |= RTF_CACHE;
@@ -1137,7 +1160,6 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1236,7 +1258,6 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1258,7 +1279,6 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = msg->dest;
        fl6.saddr = iph->daddr;
 
@@ -1338,6 +1358,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        atomic_set(&rt->dst.__refcnt, 1);
+       rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
@@ -1873,7 +1894,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
                        in6_dev_hold(rt->rt6i_idev);
                rt->dst.lastuse = jiffies;
 
-               rt->rt6i_gateway = ort->rt6i_gateway;
+               if (ort->rt6i_flags & RTF_GATEWAY)
+                       rt->rt6i_gateway = ort->rt6i_gateway;
+               else
+                       rt->rt6i_gateway = *dest;
                rt->rt6i_flags = ort->rt6i_flags;
                if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
                    (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2184,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        else
                rt->rt6i_flags |= RTF_LOCAL;
 
+       rt->rt6i_gateway  = *addr;
        rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
@@ -2800,56 +2825,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 
 #ifdef CONFIG_PROC_FS
 
-struct rt6_proc_arg
-{
-       char *buffer;
-       int offset;
-       int length;
-       int skip;
-       int len;
-};
-
-static int rt6_info_route(struct rt6_info *rt, void *p_arg)
-{
-       struct seq_file *m = p_arg;
-
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
-
-#ifdef CONFIG_IPV6_SUBTREES
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
-#else
-       seq_puts(m, "00000000000000000000000000000000 00 ");
-#endif
-       if (rt->rt6i_flags & RTF_GATEWAY) {
-               seq_printf(m, "%pi6", &rt->rt6i_gateway);
-       } else {
-               seq_puts(m, "00000000000000000000000000000000");
-       }
-       seq_printf(m, " %08x %08x %08x %08x %8s\n",
-                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
-                  rt->dst.__use, rt->rt6i_flags,
-                  rt->dst.dev ? rt->dst.dev->name : "");
-       return 0;
-}
-
-static int ipv6_route_show(struct seq_file *m, void *v)
-{
-       struct net *net = (struct net *)m->private;
-       fib6_clean_all_ro(net, rt6_info_route, 0, m);
-       return 0;
-}
-
-static int ipv6_route_open(struct inode *inode, struct file *file)
-{
-       return single_open_net(inode, file, ipv6_route_show);
-}
-
 static const struct file_operations ipv6_route_proc_fops = {
        .owner          = THIS_MODULE,
        .open           = ipv6_route_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release_net,
+       .release        = seq_release_net,
 };
 
 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
index 7ee5cb96db348ab5c75946e77d3068c3c447ba17..3a9038dd818d7588c950c4bca2ff56279683314c 100644 (file)
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
        return false;
 }
 
+/* Checks if an address matches an address on the tunnel interface.
+ * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
+ * Long story:
+ * This function is called after we considered the packet as spoofed
+ * in is_spoofed_6rd.
+ * We may have a router that is doing NAT for proto 41 packets
+ * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
+ * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
+ * function will return true, dropping the packet.
+ * But, we can still check if is spoofed against the IP
+ * addresses associated with the interface.
+ */
+static bool only_dnatted(const struct ip_tunnel *tunnel,
+       const struct in6_addr *v6dst)
+{
+       int prefix_len;
+
+#ifdef CONFIG_IPV6_SIT_6RD
+       prefix_len = tunnel->ip6rd.prefixlen + 32
+               - tunnel->ip6rd.relay_prefixlen;
+#else
+       prefix_len = 48;
+#endif
+       return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
+}
+
+/* Returns true if a packet is spoofed */
+static bool packet_is_spoofed(struct sk_buff *skb,
+                             const struct iphdr *iph,
+                             struct ip_tunnel *tunnel)
+{
+       const struct ipv6hdr *ipv6h;
+
+       if (tunnel->dev->priv_flags & IFF_ISATAP) {
+               if (!isatap_chksrc(skb, iph, tunnel))
+                       return true;
+
+               return false;
+       }
+
+       if (tunnel->dev->flags & IFF_POINTOPOINT)
+               return false;
+
+       ipv6h = ipv6_hdr(skb);
+
+       if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
+               net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+                                    &iph->saddr, &ipv6h->saddr,
+                                    &iph->daddr, &ipv6h->daddr);
+               return true;
+       }
+
+       if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
+               return false;
+
+       if (only_dnatted(tunnel, &ipv6h->daddr))
+               return false;
+
+       net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+                            &iph->saddr, &ipv6h->saddr,
+                            &iph->daddr, &ipv6h->daddr);
+       return true;
+}
+
 static int ipip6_rcv(struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb)
                IPCB(skb)->flags = 0;
                skb->protocol = htons(ETH_P_IPV6);
 
-               if (tunnel->dev->priv_flags & IFF_ISATAP) {
-                       if (!isatap_chksrc(skb, iph, tunnel)) {
-                               tunnel->dev->stats.rx_errors++;
-                               goto out;
-                       }
-               } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
-                       if (is_spoofed_6rd(tunnel, iph->saddr,
-                                          &ipv6_hdr(skb)->saddr) ||
-                           is_spoofed_6rd(tunnel, iph->daddr,
-                                          &ipv6_hdr(skb)->daddr)) {
-                               tunnel->dev->stats.rx_errors++;
-                               goto out;
-                       }
+               if (packet_is_spoofed(skb, iph, tunnel)) {
+                       tunnel->dev->stats.rx_errors++;
+                       goto out;
                }
 
                __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
                if (neigh == NULL) {
-                       net_dbg_ratelimited("sit: nexthop == NULL\n");
+                       net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
 
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
                if (neigh == NULL) {
-                       net_dbg_ratelimited("sit: nexthop == NULL\n");
+                       net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
 
@@ -879,10 +933,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                ttl = iph6->hop_limit;
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
+       if (IS_ERR(skb))
+               goto out;
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
                            ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -892,8 +945,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 tx_error_icmp:
        dst_link_failure(skb);
 tx_error:
-       dev->stats.tx_errors++;
        dev_kfree_skb(skb);
+out:
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -902,13 +956,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr  *tiph = &tunnel->parms.iph;
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto out;
 
        ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
        return NETDEV_TX_OK;
+out:
+       dev->stats.tx_errors++;
+       return NETDEV_TX_OK;
 }
 
 static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
@@ -1238,6 +1294,12 @@ static void ipip6_dev_free(struct net_device *dev)
        free_netdev(dev);
 }
 
+#define SIT_FEATURES (NETIF_F_SG          | \
+                     NETIF_F_FRAGLIST     | \
+                     NETIF_F_HIGHDMA      | \
+                     NETIF_F_GSO_SOFTWARE | \
+                     NETIF_F_HW_CSUM)
+
 static void ipip6_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipip6_netdev_ops;
@@ -1251,6 +1313,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
+       dev->features           |= SIT_FEATURES;
+       dev->hw_features        |= SIT_FEATURES;
 }
 
 static int ipip6_tunnel_init(struct net_device *dev)
@@ -1612,6 +1676,7 @@ static int __net_init sit_init_net(struct net *net)
                goto err_alloc_dev;
        }
        dev_net_set(sitn->fb_tunnel_dev, net);
+       sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
@@ -1646,7 +1711,6 @@ static void __net_exit sit_exit_net(struct net *net)
 
        rtnl_lock();
        sit_destroy_tunnels(sitn, &list);
-       unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
 }
index bf63ac8a49b9df8601a5e0dabc4074e65007f94b..535a3ad262f18d7dcc04586fc55c3f0e9d4afe8b 100644 (file)
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-/* Table must be sorted. */
+static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS];
+
+/* RFC 2460, Section 8.3:
+ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
+ *
+ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
+ * using higher values than ipv4 tcp syncookies.
+ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
+ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
+ */
 static __u16 const msstab[] = {
-       64,
-       512,
-       536,
-       1280 - 60,
+       1280 - 60, /* IPV6_MIN_MTU - 60 */
        1480 - 60,
        1500 - 60,
-       4460 - 60,
        9000 - 60,
 };
 
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-
 static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
                                           struct request_sock *req,
                                           struct dst_entry *dst)
@@ -66,14 +63,18 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
                       __be16 sport, __be16 dport, u32 count, int c)
 {
-       __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
+       __u32 *tmp;
+
+       net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
+
+       tmp  = __get_cpu_var(ipv6_cookie_scratch);
 
        /*
         * we have 320 bits of information to hash, copy in the remaining
-        * 192 bits required for sha_transform, from the syncookie_secret
+        * 192 bits required for sha_transform, from the syncookie6_secret
         * and overwrite the digest with the secret
         */
-       memcpy(tmp + 10, syncookie_secret[c], 44);
+       memcpy(tmp + 10, syncookie6_secret[c], 44);
        memcpy(tmp, saddr, 16);
        memcpy(tmp + 4, daddr, 16);
        tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
@@ -86,8 +87,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
 static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
                                   const struct in6_addr *daddr,
                                   __be16 sport, __be16 dport, __u32 sseq,
-                                  __u32 count, __u32 data)
+                                  __u32 data)
 {
+       u32 count = tcp_cookie_time();
        return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
                sseq + (count << COOKIEBITS) +
                ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -96,15 +98,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
 
 static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
                                  const struct in6_addr *daddr, __be16 sport,
-                                 __be16 dport, __u32 sseq, __u32 count,
-                                 __u32 maxdiff)
+                                 __be16 dport, __u32 sseq)
 {
-       __u32 diff;
+       __u32 diff, count = tcp_cookie_time();
 
        cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
        diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
-       if (diff >= maxdiff)
+       if (diff >= MAX_SYNCOOKIE_AGE)
                return (__u32)-1;
 
        return (cookie -
@@ -125,8 +126,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
        *mssp = msstab[mssind];
 
        return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
-                                    th->dest, ntohl(th->seq),
-                                    jiffies / (HZ * 60), mssind);
+                                    th->dest, ntohl(th->seq), mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
 
@@ -146,8 +146,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 {
        __u32 seq = ntohl(th->seq) - 1;
        __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
-                                           th->source, th->dest, seq,
-                                           jiffies / (HZ * 60), COUNTER_TRIES);
+                                           th->source, th->dest, seq);
 
        return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -157,7 +156,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tcp_opt;
        struct inet_request_sock *ireq;
-       struct inet6_request_sock *ireq6;
        struct tcp_request_sock *treq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -194,7 +192,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ireq = inet_rsk(req);
-       ireq6 = inet6_rsk(req);
        treq = tcp_rsk(req);
        treq->listener = NULL;
 
@@ -202,22 +199,22 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out_free;
 
        req->mss = mss;
-       ireq->rmt_port = th->source;
-       ireq->loc_port = th->dest;
-       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq->ir_rmt_port = th->source;
+       ireq->ir_num = ntohs(th->dest);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                atomic_inc(&skb->users);
-               ireq6->pktopts = skb;
+               ireq->pktopts = skb;
        }
 
-       ireq6->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq6->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        req->expires = 0UL;
        req->num_retrans = 0;
@@ -241,12 +238,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                struct flowi6 fl6;
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_TCP;
-               fl6.daddr = ireq6->rmt_addr;
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               fl6.saddr = ireq6->loc_addr;
+               fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
-               fl6.fl6_dport = inet_rsk(req)->rmt_port;
+               fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
index 5c71501fc917d6271f72cea50cb98b9ad783f1c4..0740f93a114a26ac09150638576f523bcd53694c 100644 (file)
@@ -192,13 +192,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        if (tp->rx_opt.ts_recent_stamp &&
-           !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
+           !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
                tp->rx_opt.ts_recent = 0;
                tp->rx_opt.ts_recent_stamp = 0;
                tp->write_seq = 0;
        }
 
-       np->daddr = usin->sin6_addr;
+       sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -237,17 +237,17 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                } else {
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-                                              &np->rcv_saddr);
+                                              &sk->sk_v6_rcv_saddr);
                }
 
                return err;
        }
 
-       if (!ipv6_addr_any(&np->rcv_saddr))
-               saddr = &np->rcv_saddr;
+       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               saddr = &sk->sk_v6_rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_TCP;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
@@ -266,7 +266,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               np->rcv_saddr = *saddr;
+               sk->sk_v6_rcv_saddr = *saddr;
        }
 
        /* set the source address */
@@ -279,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        rt = (struct rt6_info *) dst;
        if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
-           ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+           ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
                tcp_fetch_timewait_stamp(sk, dst);
 
        icsk->icsk_ext_hdr_len = 0;
@@ -298,7 +298,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
-                                                            np->daddr.s6_addr32,
+                                                            sk->sk_v6_daddr.s6_addr32,
                                                             inet->inet_sport,
                                                             inet->inet_dport);
 
@@ -465,7 +465,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
                              u16 queue_mapping)
 {
-       struct inet6_request_sock *treq = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sk_buff * skb;
        int err = -ENOMEM;
@@ -477,9 +477,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        skb = tcp_make_synack(sk, dst, req, NULL);
 
        if (skb) {
-               __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
+               __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
+                                   &ireq->ir_v6_rmt_addr);
 
-               fl6->daddr = treq->rmt_addr;
+               fl6->daddr = ireq->ir_v6_rmt_addr;
                skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
@@ -502,7 +503,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
-       kfree_skb(inet6_rsk(req)->pktopts);
+       kfree_skb(inet_rsk(req)->pktopts);
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -515,13 +516,13 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
                                                struct sock *addr_sk)
 {
-       return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+       return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 }
 
 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
                                                      struct request_sock *req)
 {
-       return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+       return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 }
 
 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
@@ -621,10 +622,10 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 
        if (sk) {
                saddr = &inet6_sk(sk)->saddr;
-               daddr = &inet6_sk(sk)->daddr;
+               daddr = &sk->sk_v6_daddr;
        } else if (req) {
-               saddr = &inet6_rsk(req)->loc_addr;
-               daddr = &inet6_rsk(req)->rmt_addr;
+               saddr = &inet_rsk(req)->ir_v6_loc_addr;
+               daddr = &inet_rsk(req)->ir_v6_rmt_addr;
        } else {
                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
                saddr = &ip6h->saddr;
@@ -949,7 +950,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tmp_opt;
        struct request_sock *req;
-       struct inet6_request_sock *treq;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
@@ -994,25 +995,25 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
        tcp_openreq_init(req, &tmp_opt, skb);
 
-       treq = inet6_rsk(req);
-       treq->rmt_addr = ipv6_hdr(skb)->saddr;
-       treq->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq = inet_rsk(req);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, skb, sock_net(sk));
 
-       treq->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               treq->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        if (!isn) {
                if (ipv6_opt_accepted(sk, skb) ||
                    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
                    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                        atomic_inc(&skb->users);
-                       treq->pktopts = skb;
+                       ireq->pktopts = skb;
                }
 
                if (want_cookie) {
@@ -1051,7 +1052,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                         * to the moment of synflood.
                         */
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
-                                      &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+                                      &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
                        goto drop_and_release;
                }
 
@@ -1086,7 +1087,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                                          struct request_sock *req,
                                          struct dst_entry *dst)
 {
-       struct inet6_request_sock *treq;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct tcp6_sock *newtcp6sk;
        struct inet_sock *newinet;
@@ -1116,11 +1117,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               newnp->rcv_saddr = newnp->saddr;
+               newsk->sk_v6_rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1151,7 +1152,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                return newsk;
        }
 
-       treq = inet6_rsk(req);
+       ireq = inet_rsk(req);
 
        if (sk_acceptq_is_full(sk))
                goto out_overflow;
@@ -1185,10 +1186,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       newnp->daddr = treq->rmt_addr;
-       newnp->saddr = treq->loc_addr;
-       newnp->rcv_saddr = treq->loc_addr;
-       newsk->sk_bound_dev_if = treq->iif;
+       newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+       newnp->saddr = ireq->ir_v6_loc_addr;
+       newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+       newsk->sk_bound_dev_if = ireq->ir_iif;
 
        /* Now IPv6 options...
 
@@ -1203,11 +1204,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        /* Clone pktoptions received with SYN */
        newnp->pktoptions = NULL;
-       if (treq->pktopts != NULL) {
-               newnp->pktoptions = skb_clone(treq->pktopts,
+       if (ireq->pktopts != NULL) {
+               newnp->pktoptions = skb_clone(ireq->pktopts,
                                              sk_gfp_atomic(sk, GFP_ATOMIC));
-               consume_skb(treq->pktopts);
-               treq->pktopts = NULL;
+               consume_skb(ireq->pktopts);
+               ireq->pktopts = NULL;
                if (newnp->pktoptions)
                        skb_set_owner_r(newnp->pktoptions, newsk);
        }
@@ -1244,13 +1245,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
-       if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+       if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
                /* We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
                 * memory, then we end up not copying the key
                 * across. Shucks.
                 */
-               tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
+               tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
                               AF_INET6, key->key, key->keylen,
                               sk_gfp_atomic(sk, GFP_ATOMIC));
        }
@@ -1722,8 +1723,8 @@ static void get_openreq6(struct seq_file *seq,
                         const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
 {
        int ttd = req->expires - jiffies;
-       const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
-       const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
+       const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
+       const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
 
        if (ttd < 0)
                ttd = 0;
@@ -1734,10 +1735,10 @@ static void get_openreq6(struct seq_file *seq,
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3],
-                  ntohs(inet_rsk(req)->loc_port),
+                  inet_rsk(req)->ir_num,
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3],
-                  ntohs(inet_rsk(req)->rmt_port),
+                  ntohs(inet_rsk(req)->ir_rmt_port),
                   TCP_SYN_RECV,
                   0,0, /* could print option size, but that is af dependent. */
                   1,   /* timers active (only the expire timer) */
@@ -1758,10 +1759,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        const struct inet_sock *inet = inet_sk(sp);
        const struct tcp_sock *tp = tcp_sk(sp);
        const struct inet_connection_sock *icsk = inet_csk(sp);
-       const struct ipv6_pinfo *np = inet6_sk(sp);
 
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
+       dest  = &sp->sk_v6_daddr;
+       src   = &sp->sk_v6_rcv_saddr;
        destp = ntohs(inet->inet_dport);
        srcp  = ntohs(inet->inet_sport);
 
@@ -1810,11 +1810,10 @@ static void get_timewait6_sock(struct seq_file *seq,
 {
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
-       const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       long delta = tw->tw_ttd - jiffies;
+       s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
-       dest = &tw6->tw_v6_daddr;
-       src  = &tw6->tw_v6_rcv_saddr;
+       dest = &tw->tw_v6_daddr;
+       src  = &tw->tw_v6_rcv_saddr;
        destp = ntohs(tw->tw_dport);
        srcp  = ntohs(tw->tw_sport);
 
@@ -1834,6 +1833,7 @@ static void get_timewait6_sock(struct seq_file *seq,
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 {
        struct tcp_iter_state *st;
+       struct sock *sk = v;
 
        if (v == SEQ_START_TOKEN) {
                seq_puts(seq,
@@ -1849,14 +1849,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
        case TCP_SEQ_STATE_ESTABLISHED:
-               get_tcp6_sock(seq, v, st->num);
+               if (sk->sk_state == TCP_TIME_WAIT)
+                       get_timewait6_sock(seq, v, st->num);
+               else
+                       get_tcp6_sock(seq, v, st->num);
                break;
        case TCP_SEQ_STATE_OPENREQ:
                get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
-               get_timewait6_sock(seq, v, st->num);
-               break;
        }
 out:
        return 0;
@@ -1929,6 +1929,7 @@ struct proto tcpv6_prot = {
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .orphan_count           = &tcp_orphan_count,
+       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
index 2ec6bf6a0aa002d6c21468d090767736161a5d1b..c1097c79890070e3d04517cecb7f0b263c0a4fc7 100644 (file)
@@ -83,7 +83,7 @@ static int tcp6_gro_complete(struct sk_buff *skb)
 static const struct net_offload tcpv6_offload = {
        .callbacks = {
                .gso_send_check =       tcp_v6_gso_send_check,
-               .gso_segment    =       tcp_tso_segment,
+               .gso_segment    =       tcp_gso_segment,
                .gro_receive    =       tcp6_gro_receive,
                .gro_complete   =       tcp6_gro_complete,
        },
index f4058150262b111d316a423e1c9ddc7ee8bb29a0..f3893e897f721aa4345c82f0ea2b899c44fa0ae4 100644 (file)
 #include <trace/events/skb.h>
 #include "udp_impl.h"
 
+static unsigned int udp6_ehashfn(struct net *net,
+                                 const struct in6_addr *laddr,
+                                 const u16 lport,
+                                 const struct in6_addr *faddr,
+                                 const __be16 fport)
+{
+       static u32 udp6_ehash_secret __read_mostly;
+       static u32 udp_ipv6_hash_secret __read_mostly;
+
+       u32 lhash, fhash;
+
+       net_get_random_once(&udp6_ehash_secret,
+                           sizeof(udp6_ehash_secret));
+       net_get_random_once(&udp_ipv6_hash_secret,
+                           sizeof(udp_ipv6_hash_secret));
+
+       lhash = (__force u32)laddr->s6_addr32[3];
+       fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+
+       return __inet6_ehashfn(lhash, lport, fhash, fport,
+                              udp_ipv6_hash_secret + net_hash_mix(net));
+}
+
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
-       const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
-       __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
        int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
-       int addr_type = ipv6_addr_type(sk_rcv_saddr6);
+       int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
        /* if both are mapped, treat as IPv4 */
        if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
                return (!sk2_ipv6only &&
-                       (!sk1_rcv_saddr || !sk2_rcv_saddr ||
-                         sk1_rcv_saddr == sk2_rcv_saddr));
+                       (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+                         sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
 
        if (addr_type2 == IPV6_ADDR_ANY &&
            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -79,7 +99,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
                return 1;
 
        if (sk2_rcv_saddr6 &&
-           ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+           ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
                return 1;
 
        return 0;
@@ -107,7 +127,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
        unsigned int hash2_nulladdr =
                udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
        unsigned int hash2_partial =
-               udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
+               udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -117,7 +137,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
 static void udp_v6_rehash(struct sock *sk)
 {
        u16 new_hash = udp6_portaddr_hash(sock_net(sk),
-                                         &inet6_sk(sk)->rcv_saddr,
+                                         &sk->sk_v6_rcv_saddr,
                                          inet_sk(sk)->inet_num);
 
        udp_lib_rehash(sk, new_hash);
@@ -133,7 +153,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
                        sk->sk_family == PF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                struct inet_sock *inet = inet_sk(sk);
 
                score = 0;
@@ -142,13 +161,13 @@ static inline int compute_score(struct sock *sk, struct net *net,
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->rcv_saddr)) {
-                       if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->daddr)) {
-                       if (!ipv6_addr_equal(&np->daddr, saddr))
+               if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
                                return -1;
                        score++;
                }
@@ -171,10 +190,9 @@ static inline int compute_score2(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
                        sk->sk_family == PF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                struct inet_sock *inet = inet_sk(sk);
 
-               if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                        return -1;
                score = 0;
                if (inet->inet_dport) {
@@ -182,8 +200,8 @@ static inline int compute_score2(struct sock *sk, struct net *net,
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->daddr)) {
-                       if (!ipv6_addr_equal(&np->daddr, saddr))
+               if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
                                return -1;
                        score++;
                }
@@ -219,8 +237,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet6_ehashfn(net, daddr, hnum,
-                                                    saddr, sport);
+                               hash = udp6_ehashfn(net, daddr, hnum,
+                                                   saddr, sport);
                                matches = 1;
                        } else if (score == SCORE2_MAX)
                                goto exact_match;
@@ -300,8 +318,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet6_ehashfn(net, daddr, hnum,
-                                                    saddr, sport);
+                               hash = udp6_ehashfn(net, daddr, hnum,
+                                                   saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -525,8 +543,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (type == ICMPV6_PKT_TOOBIG)
                ip6_sk_update_pmtu(skb, sk, info);
-       if (type == NDISC_REDIRECT)
+       if (type == NDISC_REDIRECT) {
                ip6_sk_redirect(skb, sk);
+               goto out;
+       }
 
        np = inet6_sk(sk);
 
@@ -549,8 +569,10 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+       if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
                sock_rps_save_rxhash(sk, skb);
+               sk_mark_napi_id(sk, skb);
+       }
 
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
@@ -688,20 +710,19 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
 
                if (udp_sk(s)->udp_port_hash == num &&
                    s->sk_family == PF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(s);
                        if (inet->inet_dport) {
                                if (inet->inet_dport != rmt_port)
                                        continue;
                        }
-                       if (!ipv6_addr_any(&np->daddr) &&
-                           !ipv6_addr_equal(&np->daddr, rmt_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
                                continue;
 
                        if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
                                continue;
 
-                       if (!ipv6_addr_any(&np->rcv_saddr)) {
-                               if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
                                        continue;
                        }
                        if (!inet6_mc_check(s, loc_addr, rmt_addr))
@@ -844,7 +865,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
-               sk_mark_napi_id(sk, skb);
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -1062,7 +1082,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        } else if (!up->pending) {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
        } else
                daddr = NULL;
 
@@ -1132,8 +1152,8 @@ do_udp_sendmsg:
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
@@ -1144,7 +1164,7 @@ do_udp_sendmsg:
                        return -EDESTADDRREQ;
 
                fl6.fl6_dport = inet->inet_dport;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
                connected = 1;
        }
@@ -1223,9 +1243,6 @@ do_udp_sendmsg:
        if (tclass < 0)
                tclass = np->tclass;
 
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
-
        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
 back_from_confirm:
@@ -1244,6 +1261,8 @@ back_from_confirm:
        up->pending = AF_INET6;
 
 do_append_data:
+       if (dontfrag < 0)
+               dontfrag = np->dontfrag;
        up->len += ulen;
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
@@ -1260,8 +1279,8 @@ do_append_data:
        if (dst) {
                if (connected) {
                        ip6_dst_store(sk, dst,
-                                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-                                     &np->daddr : NULL,
+                                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                                     &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
                                      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                                      &np->saddr :
index 4691ed50a9282a108e43ce8e7de56d79c1635c4a..c779c3c90b9d3b90c3b4821508d3fdf31b6d5da6 100644 (file)
@@ -7,33 +7,32 @@
 #include <net/inet_common.h>
 #include <net/transp_v6.h>
 
-extern int     __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void    __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
-                              u8 , u8 , int , __be32 , struct udp_table *);
+int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+                   __be32, struct udp_table *);
 
-extern int     udp_v6_get_port(struct sock *sk, unsigned short snum);
+int udp_v6_get_port(struct sock *sk, unsigned short snum);
 
-extern int     udpv6_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen);
-extern int     udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, unsigned int optlen);
+int udpv6_getsockopt(struct sock *sk, int level, int optname,
+                    char __user *optval, int __user *optlen);
+int udpv6_setsockopt(struct sock *sk, int level, int optname,
+                    char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int     compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                       char __user *optval, unsigned int optlen);
-extern int     compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-                                      char __user *optval, int __user *optlen);
+int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, unsigned int optlen);
+int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, int __user *optlen);
 #endif
-extern int     udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
-                             struct msghdr *msg, size_t len);
-extern int     udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
-                             struct msghdr *msg, size_t len,
-                             int noblock, int flags, int *addr_len);
-extern int     udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
-extern void    udpv6_destroy_sock(struct sock *sk);
+int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len);
+int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len, int noblock, int flags, int *addr_len);
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udpv6_destroy_sock(struct sock *sk);
 
-extern void udp_v6_clear_sk(struct sock *sk, int size);
+void udp_v6_clear_sk(struct sock *sk, int size);
 
 #ifdef CONFIG_PROC_FS
-extern int     udp6_seq_show(struct seq_file *seq, void *v);
+int udp6_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif /* _UDP6_IMPL_H */
index 60559511bd9c479e7996ff9826c4852e8051df72..08e23b0bf302e93c27c74a3cd8dfc5213c453c6e 100644 (file)
@@ -64,6 +64,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                                      SKB_GSO_DODGY |
                                      SKB_GSO_UDP_TUNNEL |
                                      SKB_GSO_GRE |
+                                     SKB_GSO_IPIP |
+                                     SKB_GSO_SIT |
                                      SKB_GSO_MPLS) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
index 4770d515c2c856684e93416d4696067cae7e3114..cb04f7a16b5e102f2944be051d61511d5645d3f7 100644 (file)
 #include <net/ipv6.h>
 #include <net/xfrm.h>
 
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm6_mode_tunnel_input_mutex);
+
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
+{
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
+       int ret = -EEXIST;
+       int priority = handler->priority;
+
+       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+
+       for (pprev = &rcv_notify_handlers;
+            (t = rcu_dereference_protected(*pprev,
+            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
+                       break;
+               if (t->priority == priority)
+                       goto err;
+
+       }
+
+       handler->next = *pprev;
+       rcu_assign_pointer(*pprev, handler);
+
+       ret = 0;
+
+err:
+       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_register);
+
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
+{
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
+       int ret = -ENOENT;
+
+       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+       for (pprev = &rcv_notify_handlers;
+            (t = rcu_dereference_protected(*pprev,
+            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
+                       *pprev = handler->next;
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_deregister);
+
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
        const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
@@ -63,8 +122,15 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
        return 0;
 }
 
+#define for_each_input_rcu(head, handler)      \
+       for (handler = rcu_dereference(head);   \
+            handler != NULL;                   \
+            handler = rcu_dereference(handler->next))
+
+
 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
+       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
@@ -72,6 +138,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
+       for_each_input_rcu(rcv_notify_handlers, handler)
+               handler->handler(skb);
+
        err = skb_unclone(skb, GFP_ATOMIC);
        if (err)
                goto out;
index 23ed03d786c8376cc59f9fa2cf577ee01a4c2c2d..08ed2772b7aa58225bd6be95518ea95c1191dcd9 100644 (file)
@@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl6, 0, sizeof(struct flowi6));
        fl6->flowi6_mark = skb->mark;
+       fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
 
        fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
        fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
index 564eb0b8afa350eb5f933c981ab7bd0d68abd1b0..8d65bb9477fce626c7e781c0d38d78b488b5271c 100644 (file)
@@ -509,16 +509,11 @@ typedef struct irnet_ctrl_channel
  */
 
 /* -------------------------- IRDA PART -------------------------- */
-extern int
-       irda_irnet_create(irnet_socket *);      /* Initialise a IrNET socket */
-extern int
-       irda_irnet_connect(irnet_socket *);     /* Try to connect over IrDA */
-extern void
-       irda_irnet_destroy(irnet_socket *);     /* Teardown  a IrNET socket */
-extern int
-       irda_irnet_init(void);          /* Initialise IrDA part of IrNET */
-extern void
-       irda_irnet_cleanup(void);       /* Teardown IrDA part of IrNET */
+int irda_irnet_create(irnet_socket *); /* Initialise an IrNET socket */
+int irda_irnet_connect(irnet_socket *);        /* Try to connect over IrDA */
+void irda_irnet_destroy(irnet_socket *);       /* Teardown an IrNET socket */
+int irda_irnet_init(void);             /* Initialise IrDA part of IrNET */
+void irda_irnet_cleanup(void);         /* Teardown IrDA part of IrNET */
 
 /**************************** VARIABLES ****************************/
 
index 9d585370c5b4d6a1e60728df4dad6c79ca196ee3..911ef03bf8fbf1716672fb503f39de7fe13746d9 100644 (file)
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 
        x->id.proto = proto;
        x->id.spi = sa->sadb_sa_spi;
-       x->props.replay_window = sa->sadb_sa_replay;
+       x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
+                                       (sizeof(x->replay.bitmap) * 8));
        if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
                x->props.flags |= XFRM_STATE_NOECN;
        if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
index feae495a0a30accd9eb5a88baf1c23095bb38d66..9af77d9c0ec9b7327c275feeacc6766c7681f54d 100644 (file)
@@ -115,6 +115,11 @@ struct l2tp_net {
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+       return sk->sk_user_data;
+}
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
        BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
                return 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == PF_INET6) {
+       if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
                if (!uh->check) {
                        LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
                        return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        /* Queue the packet to IP for output */
        skb->local_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
-       if (skb->sk->sk_family == PF_INET6)
+       if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(skb, NULL);
        else
 #endif
@@ -1176,7 +1181,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
            !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
                __wsum csum = skb_checksum(skb, 0, udp_len, 0);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+               uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
                                            IPPROTO_UDP, csum);
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
@@ -1184,7 +1189,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
-               uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+               uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
                                             udp_len, IPPROTO_UDP, 0);
        }
 }
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
                /* Calculate UDP checksum if configured to do so */
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        l2tp_xmit_ipv6_csum(sk, skb, udp_len);
                else
 #endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
  */
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
-       struct l2tp_tunnel *tunnel;
+       struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
        struct l2tp_net *pn;
 
-       tunnel = sk->sk_user_data;
        if (tunnel == NULL)
                goto end;
 
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        }
 
        /* Check if this socket has already been prepped */
-       tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+       tunnel = l2tp_tunnel(sk);
        if (tunnel != NULL) {
                /* This socket has already been prepped */
                err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        if (cfg != NULL)
                tunnel->debug = cfg->debug;
 
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == PF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               if (ipv6_addr_v4mapped(&np->saddr) &&
+                   ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       tunnel->v4mapped = true;
+                       inet->inet_saddr = np->saddr.s6_addr32[3];
+                       inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
+                       inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
+               } else {
+                       tunnel->v4mapped = false;
+               }
+       }
+#endif
+
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
                udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        udpv6_encap_enable();
                else
 #endif
index 66a559b104b6a7af0bc5c76770c32ad0f816670c..1ee9f6965d6850c94f9ea70a6320836e3d0734f0 100644 (file)
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
        struct sock             *sock;          /* Parent socket */
        int                     fd;             /* Parent fd, if tunnel socket
                                                 * was created by userspace */
+#if IS_ENABLED(CONFIG_IPV6)
+       bool                    v4mapped;
+#endif
 
        struct work_struct      del_work;
 
@@ -235,29 +238,40 @@ out:
        return tunnel;
 }
 
-extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
-extern void l2tp_tunnel_sock_put(struct sock *sk);
-extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
-
-extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
-extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
-extern void __l2tp_session_unhash(struct l2tp_session *session);
-extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_session_free(struct l2tp_session *session);
-extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_session_queue_purge(struct l2tp_session *session);
-extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-
-extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-
-extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
-extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_sock_put(struct sock *sk);
+struct l2tp_session *l2tp_session_find(struct net *net,
+                                      struct l2tp_tunnel *tunnel,
+                                      u32 session_id);
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+                      u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+                      struct l2tp_tunnel **tunnelp);
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+struct l2tp_session *l2tp_session_create(int priv_size,
+                                        struct l2tp_tunnel *tunnel,
+                                        u32 session_id, u32 peer_session_id,
+                                        struct l2tp_session_cfg *cfg);
+void __l2tp_session_unhash(struct l2tp_session *session);
+int l2tp_session_delete(struct l2tp_session *session);
+void l2tp_session_free(struct l2tp_session *session);
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+                     unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+                     int length, int (*payload_hook)(struct sk_buff *skb));
+int l2tp_session_queue_purge(struct l2tp_session *session);
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+                 int hdr_len);
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+                        const struct l2tp_nl_cmd_ops *ops);
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
index 072d7202e182ffa2125fe7de9c54c584626fdeb0..2d6760a2ae347b96d465e30192ab8a7957258d32 100644 (file)
@@ -127,9 +127,10 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
 
 #if IS_ENABLED(CONFIG_IPV6)
                if (tunnel->sock->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+                       const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+
                        seq_printf(m, " from %pI6c to %pI6c\n",
-                               &np->saddr, &np->daddr);
+                               &np->saddr, &tunnel->sock->sk_v6_daddr);
                } else
 #endif
                seq_printf(m, " from %pI4 to %pI4\n",
index b8a6039314e868781d3130f06fbb78ced6c4a678..cfd65304be60ae8937449a8c1e4cf77406c11305 100644 (file)
@@ -63,7 +63,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
        struct sock *sk;
 
        sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
-               struct in6_addr *addr = inet6_rcv_saddr(sk);
+               const struct in6_addr *addr = inet6_rcv_saddr(sk);
                struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
                if (l2tp == NULL)
@@ -331,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rcu_read_unlock();
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       np->rcv_saddr = addr->l2tp_addr;
+       sk->sk_v6_rcv_saddr = addr->l2tp_addr;
        np->saddr = addr->l2tp_addr;
 
        l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
@@ -421,14 +421,14 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
                if (!lsk->peer_conn_id)
                        return -ENOTCONN;
                lsa->l2tp_conn_id = lsk->peer_conn_id;
-               lsa->l2tp_addr = np->daddr;
+               lsa->l2tp_addr = sk->sk_v6_daddr;
                if (np->sndflow)
                        lsa->l2tp_flowinfo = np->flow_label;
        } else {
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
                        lsa->l2tp_addr = np->saddr;
                else
-                       lsa->l2tp_addr = np->rcv_saddr;
+                       lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
 
                lsa->l2tp_conn_id = lsk->conn_id;
        }
@@ -537,8 +537,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    lsa->l2tp_scope_id &&
@@ -548,7 +548,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
 
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
        }
 
index 0825ff26e113f3e2acdbbe2b88bd0f4fbfd92410..be446d517bc96641d413dc0bba09d9150398a81e 100644 (file)
@@ -306,8 +306,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
                if (np) {
                        if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
                                    &np->saddr) ||
-                           nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
-                                   &np->daddr))
+                           nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
+                                   &sk->sk_v6_daddr))
                                goto nla_put_failure;
                } else
 #endif
index 5ebee2ded9e9a8f40d2a282600b47f6cb088a910..ffda81ef1a709df605ef128ebc866cb00c75fe32 100644 (file)
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
                goto error_put_sess_tun;
        }
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(ps->tunnel_sock);
        sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        skb->data[0] = ppph[0];
        skb->data[1] = ppph[1];
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(sk_tun);
        sock_put(sk);
@@ -906,8 +910,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
 #if IS_ENABLED(CONFIG_IPV6)
        } else if ((tunnel->version == 2) &&
                   (tunnel->sock->sk_family == AF_INET6)) {
-               struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
                struct sockaddr_pppol2tpin6 sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -920,13 +924,13 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                sp.pppol2tp.d_session = session->peer_session_id;
                sp.pppol2tp.addr.sin6_family = AF_INET6;
                sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-               memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-                      sizeof(np->daddr));
+               memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+                      sizeof(tunnel->sock->sk_v6_daddr));
                memcpy(uaddr, &sp, len);
        } else if ((tunnel->version == 3) &&
                   (tunnel->sock->sk_family == AF_INET6)) {
-               struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
                struct sockaddr_pppol2tpv3in6 sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -939,8 +943,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                sp.pppol2tp.d_session = session->peer_session_id;
                sp.pppol2tp.addr.sin6_family = AF_INET6;
                sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-               memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-                      sizeof(np->daddr));
+               memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+                      sizeof(tunnel->sock->sk_v6_daddr));
                memcpy(uaddr, &sp, len);
 #endif
        } else if (tunnel->version == 3) {
index 54563ad8aeb1f02bbedd18959c8cf9cd77479942..355cc3b6fa4d3e4040bf9fb1578fc43c6c4f7b52 100644 (file)
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param)
                        } else {
                                lapb->n2count++;
                                lapb_requeue_frames(lapb);
+                               lapb_kick(lapb);
                        }
                        break;
 
index ac28af74a41410abceed9884a9388d9cb37784cb..b0a651cc389fdab807b2a8b800f63144d0eaeb2f 100644 (file)
@@ -3564,7 +3564,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
        }
        band = chanctx_conf->def.chan->band;
-       sta = sta_info_get(sdata, peer);
+       sta = sta_info_get_bss(sdata, peer);
        if (sta) {
                qos = test_sta_flag(sta, WLAN_STA_WME);
        } else {
index 3a87c8976a320473a5155f5963109b4319954f7f..fe48b093d4dc1cafb420f6e75526f4bf851fa4fe 100644 (file)
@@ -334,6 +334,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_DISABLE_VHT       = BIT(11),
        IEEE80211_STA_DISABLE_80P80MHZ  = BIT(12),
        IEEE80211_STA_DISABLE_160MHZ    = BIT(13),
+       IEEE80211_STA_DISABLE_WMM       = BIT(14),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -893,6 +894,8 @@ struct tpt_led_trigger {
  *     that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
  *     a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ *     cancelled.
  */
 enum {
        SCAN_SW_SCANNING,
@@ -900,6 +903,7 @@ enum {
        SCAN_ONCHANNEL_SCANNING,
        SCAN_COMPLETED,
        SCAN_ABORTED,
+       SCAN_HW_CANCELLED,
 };
 
 /**
index 91cc8281e266cbfb7c51ea23c36e046e5f3c008a..d7bdc4b97dde2d57d79e2131333ff55f4878c9cb 100644 (file)
@@ -2527,7 +2527,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
         */
        ifmgd->wmm_last_param_set = -1;
 
-       if (elems.wmm_param)
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && elems.wmm_param)
                ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
                                         elems.wmm_param_len);
        else
@@ -2955,7 +2955,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
                                         &elems, true);
 
-       if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
+           ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
                                     elems.wmm_param_len))
                changed |= BSS_CHANGED_QOS;
 
@@ -3937,6 +3938,44 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        return err;
 }
 
+static bool ieee80211_usable_wmm_params(struct ieee80211_sub_if_data *sdata,
+                                       const u8 *wmm_param, int len)
+{
+       const u8 *pos;
+       size_t left;
+
+       if (len < 8)
+               return false;
+
+       if (wmm_param[5] != 1 /* version */)
+               return false;
+
+       pos = wmm_param + 8;
+       left = len - 8;
+
+       for (; left >= 4; left -= 4, pos += 4) {
+               u8 aifsn = pos[0] & 0x0f;
+               u8 ecwmin = pos[1] & 0x0f;
+               u8 ecwmax = (pos[1] & 0xf0) >> 4;
+               int aci = (pos[0] >> 5) & 0x03;
+
+               if (aifsn < 2) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (AIFSN=%d for ACI %d), disabling WMM\n",
+                                  aifsn, aci);
+                       return false;
+               }
+               if (ecwmin > ecwmax) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (ECWmin/max=%d/%d for ACI %d), disabling WMM\n",
+                                  ecwmin, ecwmax, aci);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_assoc_request *req)
 {
@@ -3994,9 +4033,45 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        /* prepare assoc data */
-       
+
        ifmgd->beacon_crc_valid = false;
 
+       assoc_data->wmm = bss->wmm_used &&
+                         (local->hw.queues >= IEEE80211_NUM_ACS);
+       if (assoc_data->wmm) {
+               /* try to check validity of WMM params IE */
+               const struct cfg80211_bss_ies *ies;
+               const u8 *wp, *start, *end;
+
+               rcu_read_lock();
+               ies = rcu_dereference(req->bss->ies);
+               start = ies->data;
+               end = start + ies->len;
+
+               while (true) {
+                       wp = cfg80211_find_vendor_ie(
+                               WLAN_OUI_MICROSOFT,
+                               WLAN_OUI_TYPE_MICROSOFT_WMM,
+                               start, end - start);
+                       if (!wp)
+                               break;
+                       start = wp + wp[1] + 2;
+                       /* if this IE is too short, try the next */
+                       if (wp[1] <= 4)
+                               continue;
+                       /* if this IE is WMM params, we found what we wanted */
+                       if (wp[6] == 1)
+                               break;
+               }
+
+               if (!wp || !ieee80211_usable_wmm_params(sdata, wp + 2,
+                                                       wp[1] - 2)) {
+                       assoc_data->wmm = false;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_WMM;
+               }
+               rcu_read_unlock();
+       }
+
        /*
         * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
         * We still associate in non-HT mode (11a/b/g) if any one of these
@@ -4026,18 +4101,22 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
-           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+           ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
-               if (!bss->wmm_used)
+               if (!bss->wmm_used &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
                        netdev_info(sdata->dev,
                                    "disabling HT as WMM/QoS is not supported by the AP\n");
        }
 
        /* disable VHT if we don't support it or the AP doesn't use WMM */
        if (!sband->vht_cap.vht_supported ||
-           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+           ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-               if (!bss->wmm_used)
+               if (!bss->wmm_used &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
                        netdev_info(sdata->dev,
                                    "disabling VHT as WMM/QoS is not supported by the AP\n");
        }
@@ -4066,8 +4145,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                sdata->smps_mode = ifmgd->req_smps;
 
        assoc_data->capability = req->bss->capability;
-       assoc_data->wmm = bss->wmm_used &&
-                         (local->hw.queues >= IEEE80211_NUM_ACS);
        assoc_data->supp_rates = bss->supp_rates;
        assoc_data->supp_rates_len = bss->supp_rates_len;
 
index acd1f71adc0386ab588f0939309e2367330f2b29..0c2a29484c07cdaaaf716b1f5ba0184d1047ee68 100644 (file)
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
 
                if (started)
                        ieee80211_start_next_roc(local);
+               else if (list_empty(&local->roc_list))
+                       ieee80211_run_deferred_scan(local);
        }
 
  out_unlock:
index e126605cec66baf82aadb835856110d74105795f..22b223f13c9fa22994eba6f68769a27b2cfe4eb3 100644 (file)
@@ -235,7 +235,8 @@ static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
 static void __rate_control_send_low(struct ieee80211_hw *hw,
                                    struct ieee80211_supported_band *sband,
                                    struct ieee80211_sta *sta,
-                                   struct ieee80211_tx_info *info)
+                                   struct ieee80211_tx_info *info,
+                                   u32 rate_mask)
 {
        int i;
        u32 rate_flags =
@@ -247,6 +248,12 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
 
        info->control.rates[0].idx = 0;
        for (i = 0; i < sband->n_bitrates; i++) {
+               if (!(rate_mask & BIT(i)))
+                       continue;
+
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
                if (!rate_supported(sta, sband->band, i))
                        continue;
 
@@ -274,7 +281,8 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
        bool use_basicrate = false;
 
        if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
-               __rate_control_send_low(txrc->hw, sband, pubsta, info);
+               __rate_control_send_low(txrc->hw, sband, pubsta, info,
+                                       txrc->rate_idx_mask);
 
                if (!pubsta && txrc->bss) {
                        mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
@@ -656,7 +664,8 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
                rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
 
        if (dest[0].idx < 0)
-               __rate_control_send_low(&sdata->local->hw, sband, sta, info);
+               __rate_control_send_low(&sdata->local->hw, sband, sta, info,
+                                       sdata->rc_rateidx_mask[info->band]);
 
        if (sta)
                rate_fixup_ratelist(vif, sband, info, dest, max_rates);
index 5dedc56c94dbe91a1b9bd6c959094ab3b494be86..505bc0dea074809f1532f322528e222a5052c061 100644 (file)
@@ -144,8 +144,8 @@ void rate_control_deinitialize(struct ieee80211_local *local);
 
 /* Rate control algorithms */
 #ifdef CONFIG_MAC80211_RC_PID
-extern int rc80211_pid_init(void);
-extern void rc80211_pid_exit(void);
+int rc80211_pid_init(void);
+void rc80211_pid_exit(void);
 #else
 static inline int rc80211_pid_init(void)
 {
@@ -157,8 +157,8 @@ static inline void rc80211_pid_exit(void)
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL
-extern int rc80211_minstrel_init(void);
-extern void rc80211_minstrel_exit(void);
+int rc80211_minstrel_init(void);
+void rc80211_minstrel_exit(void);
 #else
 static inline int rc80211_minstrel_init(void)
 {
@@ -170,8 +170,8 @@ static inline void rc80211_minstrel_exit(void)
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-extern int rc80211_minstrel_ht_init(void);
-extern void rc80211_minstrel_ht_exit(void);
+int rc80211_minstrel_ht_init(void);
+void rc80211_minstrel_ht_exit(void);
 #else
 static inline int rc80211_minstrel_ht_init(void)
 {
index f0247a43a75c1d0825f2588cfbfbcd13fad9c28b..0011ac8150972470110b004c4780a8dbc8acd406 100644 (file)
@@ -3073,6 +3073,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
        case NL80211_IFTYPE_ADHOC:
                if (!bssid)
                        return 0;
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+                       return 0;
                if (ieee80211_is_beacon(hdr->frame_control)) {
                        return 1;
                } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
index ecb57b0bf74a19153ab0ed196fe1ae4f7e272c64..5ad66a83ef7f4d4525de163c2c2f1fe9d6931a04 100644 (file)
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        enum ieee80211_band band;
        int i, ielen, n_chans;
 
+       if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+               return false;
+
        do {
                if (local->hw_scan_band == IEEE80211_NUM_BANDS)
                        return false;
@@ -939,7 +942,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
        if (!local->scan_req)
                goto out;
 
+       /*
+        * We have a scan running and the driver already reported completion,
+        * but the worker hasn't run yet or is stuck on the mutex - mark it as
+        * cancelled.
+        */
+       if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+           test_bit(SCAN_COMPLETED, &local->scanning)) {
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
+               goto out;
+       }
+
        if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+               /*
+                * Make sure that __ieee80211_scan_completed doesn't trigger a
+                * scan on another band.
+                */
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
                if (local->ops->cancel_hw_scan)
                        drv_cancel_hw_scan(local,
                                rcu_dereference_protected(local->scan_sdata,
index 368837fe3b800e87f408039ef4d36e84bfaa7069..78dc2e99027e06b8a9fc37f5bdf7f06483476f2b 100644 (file)
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               sta->last_rx = jiffies;
+
        if (ieee80211_is_data_qos(mgmt->frame_control)) {
                struct ieee80211_hdr *hdr = (void *) skb->data;
                u8 *qc = ieee80211_get_qos_ctl(hdr);
index 5d62c5804819e8e3faa3e696e16b166c5f431524..d4cee98533fdc5a532a8ba74e48d6158faeb5003 100644 (file)
@@ -77,13 +77,13 @@ DECLARE_EVENT_CLASS(local_sdata_addr_evt,
        TP_STRUCT__entry(
                LOCAL_ENTRY
                VIF_ENTRY
-               __array(char, addr, 6)
+               __array(char, addr, ETH_ALEN)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               memcpy(__entry->addr, sdata->vif.addr, 6);
+               memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN);
        ),
 
        TP_printk(
index 4fcbf634b54863387f85b42d5f47f82de861e995..9993fcb19ecdd8b97ea0c1fe1221f916dfd24e7f 100644 (file)
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                tx->sta = rcu_dereference(sdata->u.vlan.sta);
                if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
                        return TX_DROP;
-       } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+       } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
+                                 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
                   tx->sdata->control_port_protocol == tx->skb->protocol) {
                tx->sta = sta_info_get_bss(sdata, hdr->addr1);
        }
index 550a6880625dd79cfd9741017ddb6e63f4c50274..aefb9d5b962023eb59b750f867de6733eb1c142b 100644 (file)
@@ -2101,7 +2101,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
-       int rate, skip, shift;
+       int rate, shift;
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
        u32 rate_flags;
@@ -2129,14 +2129,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                pos = skb_put(skb, exrates + 2);
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
                *pos++ = exrates;
-               skip = 0;
                for (i = 8; i < sband->n_bitrates; i++) {
                        u8 basic = 0;
                        if ((rate_flags & sband->bitrates[i].flags)
                            != rate_flags)
                                continue;
-                       if (skip++ < 8)
-                               continue;
                        if (need_basic && basic_rates & BIT(i))
                                basic = 0x80;
                        rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2239,6 +2236,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
+       if (WARN_ONCE(!rate,
+                     "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
+                     status->flag, status->rate_idx, status->vht_nss))
+               return 0;
 
        /* rewind from end of MPDU */
        if (status->flag & RX_FLAG_MACTIME_END)
index 1bec1219ab815ff37c3970a4e2473e0a4bb62809..851cd880b0c048c4dcd45e1b6652888338490cf9 100644 (file)
@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
                                  SKB_GSO_GRE |
+                                 SKB_GSO_IPIP |
                                  SKB_GSO_MPLS)))
                goto out;
 
index 6e839b6dff2b1349f86d87837e0a91bb486cc4bf..48acec17e27a1524ced30bd86ad865f147ef3a3e 100644 (file)
@@ -413,6 +413,58 @@ config NETFILTER_SYNPROXY
 
 endif # NF_CONNTRACK
 
+config NF_TABLES
+       depends on NETFILTER_NETLINK
+       tristate "Netfilter nf_tables support"
+
+config NFT_EXTHDR
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables IPv6 exthdr module"
+
+config NFT_META
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables meta module"
+
+config NFT_CT
+       depends on NF_TABLES
+       depends on NF_CONNTRACK
+       tristate "Netfilter nf_tables conntrack module"
+
+config NFT_RBTREE
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables rbtree set module"
+
+config NFT_HASH
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables hash set module"
+
+config NFT_COUNTER
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables counter module"
+
+config NFT_LOG
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables log module"
+
+config NFT_LIMIT
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables limit module"
+
+config NFT_NAT
+       depends on NF_TABLES
+       depends on NF_CONNTRACK
+       depends on NF_NAT
+       tristate "Netfilter nf_tables nat module"
+
+config NFT_COMPAT
+       depends on NF_TABLES
+       depends on NETFILTER_XTABLES
+       tristate "Netfilter x_tables over nf_tables module"
+       help
+         This is required if you intend to use any of existing
+         x_tables match/target extensions over the nf_tables
+         framework.
+
 config NETFILTER_XTABLES
        tristate "Netfilter Xtables support (required for ip_tables)"
        default m if NETFILTER_ADVANCED=n
index c3a0a12907f693630b841400d3e47babfd338bde..394483b2c193c53fb08dd98591969db0fa995fa8 100644 (file)
@@ -64,6 +64,24 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 # SYNPROXY
 obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
+# nf_tables
+nf_tables-objs += nf_tables_core.o nf_tables_api.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
+
+obj-$(CONFIG_NF_TABLES)                += nf_tables.o
+obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
+obj-$(CONFIG_NFT_EXTHDR)       += nft_exthdr.o
+obj-$(CONFIG_NFT_META)         += nft_meta.o
+obj-$(CONFIG_NFT_CT)           += nft_ct.o
+obj-$(CONFIG_NFT_LIMIT)                += nft_limit.o
+obj-$(CONFIG_NFT_NAT)          += nft_nat.o
+#nf_tables-objs                        += nft_meta_target.o
+obj-$(CONFIG_NFT_RBTREE)       += nft_rbtree.o
+obj-$(CONFIG_NFT_HASH)         += nft_hash.o
+obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
+obj-$(CONFIG_NFT_LOG)          += nft_log.o
+
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
index 593b16ea45e0d817787c4d06d9d3d703df2fc65b..1fbab0cdd302bdafe199d434fc10f2f6401ef6a8 100644 (file)
@@ -146,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
index ba36c283d8372dfb3c890994c57c6bb0d46266b0..a2d6263b6c648e97c663025660ad47320c2c08ff 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig IP_SET
        tristate "IP set support"
        depends on INET && NETFILTER
-       depends on NETFILTER_NETLINK
+       select NETFILTER_NETLINK
        help
          This option adds IP set support to the kernel.
          In order to define and use the sets, you need the userspace utility
@@ -90,6 +90,15 @@ config IP_SET_HASH_IPPORTNET
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETPORTNET
+       tristate "hash:net,port,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,port,net set type support, by which
+         one can store two IPv4/IPv6 subnets, and a protocol/port in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NET
        tristate "hash:net set support"
        depends on IP_SET
@@ -99,6 +108,15 @@ config IP_SET_HASH_NET
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETNET
+       tristate "hash:net,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,net  set type support, by which
+         one can store IPv4/IPv6 network address/prefix pairs in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NETPORT
        tristate "hash:net,port set support"
        depends on IP_SET
index 6e965ecd5444696b4dc7e09bb98b1d09420ce9ab..44b2d38476faeb75a95c5ac8348330a99aa866fd 100644 (file)
@@ -20,6 +20,8 @@ obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
 obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
 obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
 obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
+obj-$(CONFIG_IP_SET_HASH_NETNET) += ip_set_hash_netnet.o
+obj-$(CONFIG_IP_SET_HASH_NETPORTNET) += ip_set_hash_netportnet.o
 
 # list types
 obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
index 25243379b887e5724d147cac0afedab58a1dec61..a13e15be7911cfcd2e3103f9790beaeb869bd478 100644 (file)
@@ -8,38 +8,32 @@
 #ifndef __IP_SET_BITMAP_IP_GEN_H
 #define __IP_SET_BITMAP_IP_GEN_H
 
-#define CONCAT(a, b)           a##b
-#define TOKEN(a,b)             CONCAT(a, b)
-
-#define mtype_do_test          TOKEN(MTYPE, _do_test)
-#define mtype_gc_test          TOKEN(MTYPE, _gc_test)
-#define mtype_is_filled                TOKEN(MTYPE, _is_filled)
-#define mtype_do_add           TOKEN(MTYPE, _do_add)
-#define mtype_do_del           TOKEN(MTYPE, _do_del)
-#define mtype_do_list          TOKEN(MTYPE, _do_list)
-#define mtype_do_head          TOKEN(MTYPE, _do_head)
-#define mtype_adt_elem         TOKEN(MTYPE, _adt_elem)
-#define mtype_add_timeout      TOKEN(MTYPE, _add_timeout)
-#define mtype_gc_init          TOKEN(MTYPE, _gc_init)
-#define mtype_kadt             TOKEN(MTYPE, _kadt)
-#define mtype_uadt             TOKEN(MTYPE, _uadt)
-#define mtype_destroy          TOKEN(MTYPE, _destroy)
-#define mtype_flush            TOKEN(MTYPE, _flush)
-#define mtype_head             TOKEN(MTYPE, _head)
-#define mtype_same_set         TOKEN(MTYPE, _same_set)
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_test             TOKEN(MTYPE, _test)
-#define mtype_add              TOKEN(MTYPE, _add)
-#define mtype_del              TOKEN(MTYPE, _del)
-#define mtype_list             TOKEN(MTYPE, _list)
-#define mtype_gc               TOKEN(MTYPE, _gc)
+#define mtype_do_test          IPSET_TOKEN(MTYPE, _do_test)
+#define mtype_gc_test          IPSET_TOKEN(MTYPE, _gc_test)
+#define mtype_is_filled                IPSET_TOKEN(MTYPE, _is_filled)
+#define mtype_do_add           IPSET_TOKEN(MTYPE, _do_add)
+#define mtype_ext_cleanup      IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_do_del           IPSET_TOKEN(MTYPE, _do_del)
+#define mtype_do_list          IPSET_TOKEN(MTYPE, _do_list)
+#define mtype_do_head          IPSET_TOKEN(MTYPE, _do_head)
+#define mtype_adt_elem         IPSET_TOKEN(MTYPE, _adt_elem)
+#define mtype_add_timeout      IPSET_TOKEN(MTYPE, _add_timeout)
+#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_kadt             IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt             IPSET_TOKEN(MTYPE, _uadt)
+#define mtype_destroy          IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_flush            IPSET_TOKEN(MTYPE, _flush)
+#define mtype_head             IPSET_TOKEN(MTYPE, _head)
+#define mtype_same_set         IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_test             IPSET_TOKEN(MTYPE, _test)
+#define mtype_add              IPSET_TOKEN(MTYPE, _add)
+#define mtype_del              IPSET_TOKEN(MTYPE, _del)
+#define mtype_list             IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
 #define mtype                  MTYPE
 
-#define ext_timeout(e, m)      \
-       (unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)      \
-       (struct ip_set_counter *)((e) + (m)->offset[IPSET_OFFSET_COUNTER])
-#define get_ext(map, id)       ((map)->extensions + (map)->dsize * (id))
+#define get_ext(set, map, id)  ((map)->extensions + (set)->dsize * (id))
 
 static void
 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -49,10 +43,21 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&map->gc);
        map->gc.data = (unsigned long) set;
        map->gc.function = gc;
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
+static void
+mtype_ext_cleanup(struct ip_set *set)
+{
+       struct mtype *map = set->data;
+       u32 id;
+
+       for (id = 0; id < map->elements; id++)
+               if (test_bit(id, map->members))
+                       ip_set_ext_destroy(set, get_ext(set, map, id));
+}
+
 static void
 mtype_destroy(struct ip_set *set)
 {
@@ -62,8 +67,11 @@ mtype_destroy(struct ip_set *set)
                del_timer_sync(&map->gc);
 
        ip_set_free(map->members);
-       if (map->dsize)
+       if (set->dsize) {
+               if (set->extensions & IPSET_EXT_DESTROY)
+                       mtype_ext_cleanup(set);
                ip_set_free(map->extensions);
+       }
        kfree(map);
 
        set->data = NULL;
@@ -74,6 +82,8 @@ mtype_flush(struct ip_set *set)
 {
        struct mtype *map = set->data;
 
+       if (set->extensions & IPSET_EXT_DESTROY)
+               mtype_ext_cleanup(set);
        memset(map->members, 0, map->memsize);
 }
 
@@ -91,12 +101,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
                          htonl(sizeof(*map) +
                                map->memsize +
-                               map->dsize * map->elements)) ||
-           (SET_WITH_TIMEOUT(set) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-           (SET_WITH_COUNTER(set) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))))
+                               set->dsize * map->elements)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -111,16 +118,16 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       void *x = get_ext(map, e->id);
-       int ret = mtype_do_test(e, map);
+       void *x = get_ext(set, map, e->id);
+       int ret = mtype_do_test(e, map, set->dsize);
 
        if (ret <= 0)
                return ret;
        if (SET_WITH_TIMEOUT(set) &&
-           ip_set_timeout_expired(ext_timeout(x, map)))
+           ip_set_timeout_expired(ext_timeout(x, set)))
                return 0;
        if (SET_WITH_COUNTER(set))
-               ip_set_update_counter(ext_counter(x, map), ext, mext, flags);
+               ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
        return 1;
 }
 
@@ -130,26 +137,30 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       void *x = get_ext(map, e->id);
-       int ret = mtype_do_add(e, map, flags);
+       void *x = get_ext(set, map, e->id);
+       int ret = mtype_do_add(e, map, flags, set->dsize);
 
        if (ret == IPSET_ADD_FAILED) {
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(x, map)))
+                   ip_set_timeout_expired(ext_timeout(x, set)))
                        ret = 0;
                else if (!(flags & IPSET_FLAG_EXIST))
                        return -IPSET_ERR_EXIST;
+               /* Element is re-added, cleanup extensions */
+               ip_set_ext_destroy(set, x);
        }
 
        if (SET_WITH_TIMEOUT(set))
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
-               mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret);
+               mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
 #else
-               ip_set_timeout_set(ext_timeout(x, map), ext->timeout);
+               ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
 #endif
 
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(x, map), ext);
+               ip_set_init_counter(ext_counter(x, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(x, set), ext);
        return 0;
 }
 
@@ -159,16 +170,27 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       const void *x = get_ext(map, e->id);
+       void *x = get_ext(set, map, e->id);
 
-       if (mtype_do_del(e, map) ||
-           (SET_WITH_TIMEOUT(set) &&
-            ip_set_timeout_expired(ext_timeout(x, map))))
+       if (mtype_do_del(e, map))
+               return -IPSET_ERR_EXIST;
+
+       ip_set_ext_destroy(set, x);
+       if (SET_WITH_TIMEOUT(set) &&
+           ip_set_timeout_expired(ext_timeout(x, set)))
                return -IPSET_ERR_EXIST;
 
        return 0;
 }
 
+#ifndef IP_SET_BITMAP_STORED_TIMEOUT
+static inline bool
+mtype_is_filled(const struct mtype_elem *x)
+{
+       return true;
+}
+#endif
+
 static int
 mtype_list(const struct ip_set *set,
           struct sk_buff *skb, struct netlink_callback *cb)
@@ -183,13 +205,13 @@ mtype_list(const struct ip_set *set,
                return -EMSGSIZE;
        for (; cb->args[2] < map->elements; cb->args[2]++) {
                id = cb->args[2];
-               x = get_ext(map, id);
+               x = get_ext(set, map, id);
                if (!test_bit(id, map->members) ||
                    (SET_WITH_TIMEOUT(set) &&
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
                     mtype_is_filled((const struct mtype_elem *) x) &&
 #endif
-                    ip_set_timeout_expired(ext_timeout(x, map))))
+                    ip_set_timeout_expired(ext_timeout(x, set))))
                        continue;
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested) {
@@ -199,23 +221,10 @@ mtype_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               if (mtype_do_list(skb, map, id))
+               if (mtype_do_list(skb, map, id, set->dsize))
                        goto nla_put_failure;
-               if (SET_WITH_TIMEOUT(set)) {
-#ifdef IP_SET_BITMAP_STORED_TIMEOUT
-                       if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_stored(map, id,
-                                                       ext_timeout(x, map)))))
-                               goto nla_put_failure;
-#else
-                       if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_get(
-                                                       ext_timeout(x, map)))))
-                               goto nla_put_failure;
-#endif
-               }
-               if (SET_WITH_COUNTER(set) &&
-                   ip_set_put_counter(skb, ext_counter(x, map)))
+               if (ip_set_put_extensions(skb, set, x,
+                   mtype_is_filled((const struct mtype_elem *) x)))
                        goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
@@ -228,11 +237,11 @@ mtype_list(const struct ip_set *set,
 
 nla_put_failure:
        nla_nest_cancel(skb, nested);
-       ipset_nest_end(skb, adt);
        if (unlikely(id == first)) {
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, adt);
        return 0;
 }
 
@@ -241,21 +250,23 @@ mtype_gc(unsigned long ul_set)
 {
        struct ip_set *set = (struct ip_set *) ul_set;
        struct mtype *map = set->data;
-       const void *x;
+       void *x;
        u32 id;
 
        /* We run parallel with other readers (test element)
         * but adding/deleting new entries is locked out */
        read_lock_bh(&set->lock);
        for (id = 0; id < map->elements; id++)
-               if (mtype_gc_test(id, map)) {
-                       x = get_ext(map, id);
-                       if (ip_set_timeout_expired(ext_timeout(x, map)))
+               if (mtype_gc_test(id, map, set->dsize)) {
+                       x = get_ext(set, map, id);
+                       if (ip_set_timeout_expired(ext_timeout(x, set))) {
                                clear_bit(id, map->members);
+                               ip_set_ext_destroy(set, x);
+                       }
                }
        read_unlock_bh(&set->lock);
 
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
index f1a8128bef01c3f555c23264683d9f1688b06509..6f1f9f4948084e40375ab722565e7bf20b2d972d 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 #define MTYPE          bitmap_ip
@@ -44,10 +45,7 @@ struct bitmap_ip {
        u32 elements;           /* number of max elements in the set */
        u32 hosts;              /* number of hosts in a subnet */
        size_t memsize;         /* members size */
-       size_t dsize;           /* extensions struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        u8 netmask;             /* subnet netmask */
-       u32 timeout;            /* timeout parameter */
        struct timer_list gc;   /* garbage collection */
 };
 
@@ -65,20 +63,21 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
 /* Common functions */
 
 static inline int
-bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
+bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
+                 struct bitmap_ip *map, size_t dsize)
 {
        return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map)
+bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
 {
        return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
-                u32 flags)
+                u32 flags, size_t dsize)
 {
        return !!test_and_set_bit(e->id, map->members);
 }
@@ -90,7 +89,8 @@ bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
 }
 
 static inline int
-bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id)
+bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
+                 size_t dsize)
 {
        return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
                        htonl(map->first_ip + id * map->hosts));
@@ -113,7 +113,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_ip *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ip_adt_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        u32 ip;
 
        ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
@@ -131,9 +131,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct bitmap_ip *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       u32 ip, ip_to;
+       u32 ip = 0, ip_to = 0;
        struct bitmap_ip_adt_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -200,7 +200,7 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
        return x->first_ip == y->first_ip &&
               x->last_ip == y->last_ip &&
               x->netmask == y->netmask &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -209,25 +209,6 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
 struct bitmap_ip_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_ipt_elem {
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipc_elem {
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipct_elem {
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -240,8 +221,8 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
        map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -252,7 +233,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
        map->elements = elements;
        map->hosts = hosts;
        map->netmask = netmask;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_IPV4;
@@ -261,10 +242,11 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
 }
 
 static int
-bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+                u32 flags)
 {
        struct bitmap_ip *map;
-       u32 first_ip, last_ip, hosts, cadt_flags = 0;
+       u32 first_ip = 0, last_ip = 0, hosts;
        u64 elements;
        u8 netmask = 32;
        int ret;
@@ -336,61 +318,15 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 
        map->memsize = bitmap_bytes(0, elements - 1);
        set->variant = &bitmap_ip;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_ipct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_ipct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipct_elem, counter);
-
-                       if (!init_map_ip(set, map, first_ip, last_ip,
-                                        elements, hosts, netmask)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-
-                       map->timeout = ip_set_timeout_uget(
-                               tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-
-                       bitmap_ip_gc_init(set, bitmap_ip_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_ipc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipc_elem, counter);
-
-                       if (!init_map_ip(set, map, first_ip, last_ip,
-                                        elements, hosts, netmask)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_ipt_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_ipt_elem, timeout);
-
-               if (!init_map_ip(set, map, first_ip, last_ip,
-                                elements, hosts, netmask)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
-
+       set->dsize = ip_set_elem_len(set, tb, 0);
+       if (!init_map_ip(set, map, first_ip, last_ip,
+                        elements, hosts, netmask)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_ip_gc_init(set, bitmap_ip_gc);
-       } else {
-               map->dsize = 0;
-               if (!init_map_ip(set, map, first_ip, last_ip,
-                                elements, hosts, netmask)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
        }
        return 0;
 }
@@ -401,8 +337,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_IPV4,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_ip_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
@@ -420,6 +356,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 3b30e0bef890424abbb3bbf71bd2df147a52eab2..740eabededd9754b7db95a9d46d48f8a1f283bf7 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 #define MTYPE          bitmap_ipmac
@@ -48,11 +49,8 @@ struct bitmap_ipmac {
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
-       u32 timeout;            /* timeout value */
-       struct timer_list gc;   /* garbage collector */
        size_t memsize;         /* members size */
-       size_t dsize;           /* size of element */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
+       struct timer_list gc;   /* garbage collector */
 };
 
 /* ADT structure for generic function args */
@@ -82,13 +80,13 @@ get_elem(void *extensions, u16 id, size_t dsize)
 
 static inline int
 bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
-                    const struct bitmap_ipmac *map)
+                    const struct bitmap_ipmac *map, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem;
 
        if (!test_bit(e->id, map->members))
                return 0;
-       elem = get_elem(map->extensions, e->id, map->dsize);
+       elem = get_elem(map->extensions, e->id, dsize);
        if (elem->filled == MAC_FILLED)
                return e->ether == NULL ||
                       ether_addr_equal(e->ether, elem->ether);
@@ -97,13 +95,13 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
 }
 
 static inline int
-bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map)
+bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem;
 
        if (!test_bit(id, map->members))
                return 0;
-       elem = get_elem(map->extensions, id, map->dsize);
+       elem = get_elem(map->extensions, id, dsize);
        /* Timer not started for the incomplete elements */
        return elem->filled == MAC_FILLED;
 }
@@ -117,13 +115,13 @@ bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
 static inline int
 bitmap_ipmac_add_timeout(unsigned long *timeout,
                         const struct bitmap_ipmac_adt_elem *e,
-                        const struct ip_set_ext *ext,
+                        const struct ip_set_ext *ext, struct ip_set *set,
                         struct bitmap_ipmac *map, int mode)
 {
        u32 t = ext->timeout;
 
        if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
-               if (t == map->timeout)
+               if (t == set->timeout)
                        /* Timeout was not specified, get stored one */
                        t = *timeout;
                ip_set_timeout_set(timeout, t);
@@ -142,11 +140,11 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
 
 static inline int
 bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
-                   struct bitmap_ipmac *map, u32 flags)
+                   struct bitmap_ipmac *map, u32 flags, size_t dsize)
 {
        struct bitmap_ipmac_elem *elem;
 
-       elem = get_elem(map->extensions, e->id, map->dsize);
+       elem = get_elem(map->extensions, e->id, dsize);
        if (test_and_set_bit(e->id, map->members)) {
                if (elem->filled == MAC_FILLED) {
                        if (e->ether && (flags & IPSET_FLAG_EXIST))
@@ -178,22 +176,12 @@ bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
        return !test_and_clear_bit(e->id, map->members);
 }
 
-static inline unsigned long
-ip_set_timeout_stored(struct bitmap_ipmac *map, u32 id, unsigned long *timeout)
-{
-       const struct bitmap_ipmac_elem *elem =
-               get_elem(map->extensions, id, map->dsize);
-
-       return elem->filled == MAC_FILLED ? ip_set_timeout_get(timeout) :
-                                           *timeout;
-}
-
 static inline int
 bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
-                    u32 id)
+                    u32 id, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem =
-               get_elem(map->extensions, id, map->dsize);
+               get_elem(map->extensions, id, dsize);
 
        return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
                               htonl(map->first_ip + id)) ||
@@ -216,7 +204,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_ipmac *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ipmac_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        u32 ip;
 
        /* MAC can be src only */
@@ -245,8 +233,8 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct bitmap_ipmac *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ipmac_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
-       u32 ip;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0;
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -285,43 +273,12 @@ bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
 
        return x->first_ip == y->first_ip &&
               x->last_ip == y->last_ip &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
 /* Plain variant */
 
-/* Timeout variant */
-
-struct bitmap_ipmact_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipmacc_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipmacct_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip,mac type of sets */
@@ -330,11 +287,11 @@ static bool
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
               u32 first_ip, u32 last_ip, u32 elements)
 {
-       map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+       map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -343,7 +300,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
        map->first_ip = first_ip;
        map->last_ip = last_ip;
        map->elements = elements;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_IPV4;
@@ -352,10 +309,10 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
 }
 
 static int
-bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                    u32 flags)
 {
-       u32 first_ip, last_ip, cadt_flags = 0;
+       u32 first_ip = 0, last_ip = 0;
        u64 elements;
        struct bitmap_ipmac *map;
        int ret;
@@ -399,57 +356,15 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
 
        map->memsize = bitmap_bytes(0, elements - 1);
        set->variant = &bitmap_ipmac;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_ipmacct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_ipmacct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipmacct_elem, counter);
-
-                       if (!init_map_ipmac(set, map, first_ip, last_ip,
-                                           elements)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-                       map->timeout = ip_set_timeout_uget(
-                               tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_ipmacc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipmacc_elem, counter);
-
-                       if (!init_map_ipmac(set, map, first_ip, last_ip,
-                                           elements)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_ipmact_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_ipmact_elem, timeout);
-
-               if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
+       set->dsize = ip_set_elem_len(set, tb,
+                                    sizeof(struct bitmap_ipmac_elem));
+       if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-       } else {
-               map->dsize = sizeof(struct bitmap_ipmac_elem);
-
-               if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-               set->variant = &bitmap_ipmac;
        }
        return 0;
 }
@@ -460,8 +375,8 @@ static struct ip_set_type bitmap_ipmac_type = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_IPV4,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_ipmac_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
@@ -478,6 +393,7 @@ static struct ip_set_type bitmap_ipmac_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 8207d1fda5288dcf252a96b9db60ed5fd543144b..e7603c5b53d737b9de6248bfd0b87d0005c242fd 100644 (file)
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 #include <linux/netfilter/ipset/ip_set_getport.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:port");
 
 #define MTYPE          bitmap_port
@@ -38,9 +39,6 @@ struct bitmap_port {
        u16 last_port;          /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
        size_t memsize;         /* members size */
-       size_t dsize;           /* extensions struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
-       u32 timeout;            /* timeout parameter */
        struct timer_list gc;   /* garbage collection */
 };
 
@@ -59,20 +57,20 @@ port_to_id(const struct bitmap_port *m, u16 port)
 
 static inline int
 bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
-                   const struct bitmap_port *map)
+                   const struct bitmap_port *map, size_t dsize)
 {
        return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_port_gc_test(u16 id, const struct bitmap_port *map)
+bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
 {
        return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
-                  struct bitmap_port *map, u32 flags)
+                  struct bitmap_port *map, u32 flags, size_t dsize)
 {
        return !!test_and_set_bit(e->id, map->members);
 }
@@ -85,7 +83,8 @@ bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
 }
 
 static inline int
-bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id)
+bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
+                   size_t dsize)
 {
        return nla_put_net16(skb, IPSET_ATTR_PORT,
                             htons(map->first_port + id));
@@ -106,7 +105,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_port *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_port_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        __be16 __port;
        u16 port = 0;
 
@@ -131,7 +130,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
        struct bitmap_port *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_port_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port;       /* wraparound */
        u16 port_to;
        int ret = 0;
@@ -191,7 +190,7 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
 
        return x->first_port == y->first_port &&
               x->last_port == y->last_port &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -200,25 +199,6 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
 struct bitmap_port_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_portt_elem {
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_portc_elem {
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_portct_elem {
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -230,8 +210,8 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
        map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * map->elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * map->elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -239,7 +219,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
        }
        map->first_port = first_port;
        map->last_port = last_port;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_UNSPEC;
@@ -248,11 +228,11 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
 }
 
 static int
-bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+                  u32 flags)
 {
        struct bitmap_port *map;
        u16 first_port, last_port;
-       u32 cadt_flags = 0;
 
        if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
                     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -276,53 +256,14 @@ bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        map->elements = last_port - first_port + 1;
        map->memsize = map->elements * sizeof(unsigned long);
        set->variant = &bitmap_port;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_portct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_portct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_portct_elem, counter);
-                       if (!init_map_port(set, map, first_port, last_port)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-
-                       map->timeout =
-                               ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       bitmap_port_gc_init(set, bitmap_port_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_portc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_portc_elem, counter);
-                       if (!init_map_port(set, map, first_port, last_port)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_portt_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_portt_elem, timeout);
-               if (!init_map_port(set, map, first_port, last_port)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
+       set->dsize = ip_set_elem_len(set, tb, 0);
+       if (!init_map_port(set, map, first_port, last_port)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_port_gc_init(set, bitmap_port_gc);
-       } else {
-               map->dsize = 0;
-               if (!init_map_port(set, map, first_port, last_port)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
        }
        return 0;
 }
@@ -333,8 +274,8 @@ static struct ip_set_type bitmap_port_type = {
        .features       = IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_port_create,
        .create_policy  = {
                [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
@@ -349,6 +290,7 @@ static struct ip_set_type bitmap_port_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index f2e30fb31e78efa405156ca99ba9aeaded3ea49c..dc9284bdd2dd134fa4f96c7d96bcb845dc3f0269 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/spinlock.h>
 #include <linux/rculist.h>
 #include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter/x_tables.h>
@@ -27,8 +29,17 @@ static LIST_HEAD(ip_set_type_list);          /* all registered set types */
 static DEFINE_MUTEX(ip_set_type_mutex);                /* protects ip_set_type_list */
 static DEFINE_RWLOCK(ip_set_ref_lock);         /* protects the set refs */
 
-static struct ip_set * __rcu *ip_set_list;     /* all individual sets */
-static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+struct ip_set_net {
+       struct ip_set * __rcu *ip_set_list;     /* all individual sets */
+       ip_set_id_t     ip_set_max;     /* max number of sets */
+       int             is_deleted;     /* deleted by ip_set_net_exit */
+};
+static int ip_set_net_id __read_mostly;
+
+static inline struct ip_set_net *ip_set_pernet(struct net *net)
+{
+       return net_generic(net, ip_set_net_id);
+}
 
 #define IP_SET_INC     64
 #define STREQ(a, b)    (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
@@ -45,8 +56,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 /* When the nfnl mutex is held: */
 #define nfnl_dereference(p)            \
        rcu_dereference_protected(p, 1)
-#define nfnl_set(id)                   \
-       nfnl_dereference(ip_set_list)[id]
+#define nfnl_set(inst, id)                     \
+       nfnl_dereference((inst)->ip_set_list)[id]
 
 /*
  * The set types are implemented in modules and registered set types
@@ -315,6 +326,60 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
 
+typedef void (*destroyer)(void *);
+/* ipset data extension types, in size order */
+
+const struct ip_set_ext_type ip_set_extensions[] = {
+       [IPSET_EXT_ID_COUNTER] = {
+               .type   = IPSET_EXT_COUNTER,
+               .flag   = IPSET_FLAG_WITH_COUNTERS,
+               .len    = sizeof(struct ip_set_counter),
+               .align  = __alignof__(struct ip_set_counter),
+       },
+       [IPSET_EXT_ID_TIMEOUT] = {
+               .type   = IPSET_EXT_TIMEOUT,
+               .len    = sizeof(unsigned long),
+               .align  = __alignof__(unsigned long),
+       },
+       [IPSET_EXT_ID_COMMENT] = {
+               .type    = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
+               .flag    = IPSET_FLAG_WITH_COMMENT,
+               .len     = sizeof(struct ip_set_comment),
+               .align   = __alignof__(struct ip_set_comment),
+               .destroy = (destroyer) ip_set_comment_free,
+       },
+};
+EXPORT_SYMBOL_GPL(ip_set_extensions);
+
+static inline bool
+add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
+{
+       return ip_set_extensions[id].flag ?
+               (flags & ip_set_extensions[id].flag) :
+               !!tb[IPSET_ATTR_TIMEOUT];
+}
+
+size_t
+ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
+{
+       enum ip_set_ext_id id;
+       size_t offset = 0;
+       u32 cadt_flags = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS])
+               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+       for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
+               if (!add_extension(id, cadt_flags, tb))
+                       continue;
+               offset += ALIGN(len + offset, ip_set_extensions[id].align);
+               set->offset[id] = offset;
+               set->extensions |= ip_set_extensions[id].type;
+               offset += ip_set_extensions[id].len;
+       }
+       return len + offset;
+}
+EXPORT_SYMBOL_GPL(ip_set_elem_len);
+
 int
 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                      struct ip_set_ext *ext)
@@ -334,6 +399,12 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                        ext->packets = be64_to_cpu(nla_get_be64(
                                                   tb[IPSET_ATTR_PACKETS]));
        }
+       if (tb[IPSET_ATTR_COMMENT]) {
+               if (!(set->extensions & IPSET_EXT_COMMENT))
+                       return -IPSET_ERR_COMMENT;
+               ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
@@ -374,13 +445,14 @@ __ip_set_put(struct ip_set *set)
  */
 
 static inline struct ip_set *
-ip_set_rcu_get(ip_set_id_t index)
+ip_set_rcu_get(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        rcu_read_lock();
        /* ip_set_list itself needs to be protected */
-       set = rcu_dereference(ip_set_list)[index];
+       set = rcu_dereference(inst->ip_set_list)[index];
        rcu_read_unlock();
 
        return set;
@@ -390,7 +462,8 @@ int
 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
            const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret = 0;
 
        BUG_ON(set == NULL);
@@ -428,7 +501,8 @@ int
 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
           const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret;
 
        BUG_ON(set == NULL);
@@ -450,7 +524,8 @@ int
 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
           const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret = 0;
 
        BUG_ON(set == NULL);
@@ -474,14 +549,15 @@ EXPORT_SYMBOL_GPL(ip_set_del);
  *
  */
 ip_set_id_t
-ip_set_get_byname(const char *name, struct ip_set **set)
+ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
 {
        ip_set_id_t i, index = IPSET_INVALID_ID;
        struct ip_set *s;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        rcu_read_lock();
-       for (i = 0; i < ip_set_max; i++) {
-               s = rcu_dereference(ip_set_list)[i];
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = rcu_dereference(inst->ip_set_list)[i];
                if (s != NULL && STREQ(s->name, name)) {
                        __ip_set_get(s);
                        index = i;
@@ -501,17 +577,26 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
  * to be valid, after calling this function.
  *
  */
-void
-ip_set_put_byindex(ip_set_id_t index)
+
+static inline void
+__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
 {
        struct ip_set *set;
 
        rcu_read_lock();
-       set = rcu_dereference(ip_set_list)[index];
+       set = rcu_dereference(inst->ip_set_list)[index];
        if (set != NULL)
                __ip_set_put(set);
        rcu_read_unlock();
 }
+
+void
+ip_set_put_byindex(struct net *net, ip_set_id_t index)
+{
+       struct ip_set_net *inst = ip_set_pernet(net);
+
+       __ip_set_put_byindex(inst, index);
+}
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
 /*
@@ -522,9 +607,9 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
  *
  */
 const char *
-ip_set_name_byindex(ip_set_id_t index)
+ip_set_name_byindex(struct net *net, ip_set_id_t index)
 {
-       const struct ip_set *set = ip_set_rcu_get(index);
+       const struct ip_set *set = ip_set_rcu_get(net, index);
 
        BUG_ON(set == NULL);
        BUG_ON(set->ref == 0);
@@ -546,14 +631,15 @@ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get(const char *name)
+ip_set_nfnl_get(struct net *net, const char *name)
 {
        ip_set_id_t i, index = IPSET_INVALID_ID;
        struct ip_set *s;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       for (i = 0; i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s != NULL && STREQ(s->name, name)) {
                        __ip_set_get(s);
                        index = i;
@@ -573,15 +659,16 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get_byindex(ip_set_id_t index)
+ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
-       if (index > ip_set_max)
+       if (index > inst->ip_set_max)
                return IPSET_INVALID_ID;
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       set = nfnl_set(index);
+       set = nfnl_set(inst, index);
        if (set)
                __ip_set_get(set);
        else
@@ -600,13 +687,17 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
  * The nfnl mutex is used in the function.
  */
 void
-ip_set_nfnl_put(ip_set_id_t index)
+ip_set_nfnl_put(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
+
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       set = nfnl_set(index);
-       if (set != NULL)
-               __ip_set_put(set);
+       if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
+               set = nfnl_set(inst, index);
+               if (set != NULL)
+                       __ip_set_put(set);
+       }
        nfnl_unlock(NFNL_SUBSYS_IPSET);
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -664,14 +755,14 @@ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static struct ip_set *
-find_set_and_id(const char *name, ip_set_id_t *id)
+find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
 {
        struct ip_set *set = NULL;
        ip_set_id_t i;
 
        *id = IPSET_INVALID_ID;
-       for (i = 0; i < ip_set_max; i++) {
-               set = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               set = nfnl_set(inst, i);
                if (set != NULL && STREQ(set->name, name)) {
                        *id = i;
                        break;
@@ -681,22 +772,23 @@ find_set_and_id(const char *name, ip_set_id_t *id)
 }
 
 static inline struct ip_set *
-find_set(const char *name)
+find_set(struct ip_set_net *inst, const char *name)
 {
        ip_set_id_t id;
 
-       return find_set_and_id(name, &id);
+       return find_set_and_id(inst, name, &id);
 }
 
 static int
-find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
+            struct ip_set **set)
 {
        struct ip_set *s;
        ip_set_id_t i;
 
        *index = IPSET_INVALID_ID;
-       for (i = 0;  i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0;  i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s == NULL) {
                        if (*index == IPSET_INVALID_ID)
                                *index = i;
@@ -725,6 +817,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct net *net = sock_net(ctnl);
+       struct ip_set_net *inst = ip_set_pernet(net);
        struct ip_set *set, *clash = NULL;
        ip_set_id_t index = IPSET_INVALID_ID;
        struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
@@ -783,7 +877,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                goto put_out;
        }
 
-       ret = set->type->create(set, tb, flags);
+       ret = set->type->create(net, set, tb, flags);
        if (ret != 0)
                goto put_out;
 
@@ -794,7 +888,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * by the nfnl mutex. Find the first free index in ip_set_list
         * and check clashing.
         */
-       ret = find_free_id(set->name, &index, &clash);
+       ret = find_free_id(inst, set->name, &index, &clash);
        if (ret == -EEXIST) {
                /* If this is the same set and requested, ignore error */
                if ((flags & IPSET_FLAG_EXIST) &&
@@ -807,9 +901,9 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                goto cleanup;
        } else if (ret == -IPSET_ERR_MAX_SETS) {
                struct ip_set **list, **tmp;
-               ip_set_id_t i = ip_set_max + IP_SET_INC;
+               ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
 
-               if (i < ip_set_max || i == IPSET_INVALID_ID)
+               if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
                        /* Wraparound */
                        goto cleanup;
 
@@ -817,14 +911,14 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                if (!list)
                        goto cleanup;
                /* nfnl mutex is held, both lists are valid */
-               tmp = nfnl_dereference(ip_set_list);
-               memcpy(list, tmp, sizeof(struct ip_set *) * ip_set_max);
-               rcu_assign_pointer(ip_set_list, list);
+               tmp = nfnl_dereference(inst->ip_set_list);
+               memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+               rcu_assign_pointer(inst->ip_set_list, list);
                /* Make sure all current packets have passed through */
                synchronize_net();
                /* Use new list */
-               index = ip_set_max;
-               ip_set_max = i;
+               index = inst->ip_set_max;
+               inst->ip_set_max = i;
                kfree(tmp);
                ret = 0;
        } else if (ret)
@@ -834,7 +928,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * Finally! Add our shiny new set to the list, and be done.
         */
        pr_debug("create: '%s' created with index %u!\n", set->name, index);
-       nfnl_set(index) = set;
+       nfnl_set(inst, index) = set;
 
        return ret;
 
@@ -857,12 +951,12 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static void
-ip_set_destroy_set(ip_set_id_t index)
+ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
 {
-       struct ip_set *set = nfnl_set(index);
+       struct ip_set *set = nfnl_set(inst, index);
 
        pr_debug("set: %s\n",  set->name);
-       nfnl_set(index) = NULL;
+       nfnl_set(inst, index) = NULL;
 
        /* Must call it without holding any lock */
        set->variant->destroy(set);
@@ -875,6 +969,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
               const struct nlmsghdr *nlh,
               const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *s;
        ip_set_id_t i;
        int ret = 0;
@@ -894,21 +989,22 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
         */
        read_lock_bh(&ip_set_ref_lock);
        if (!attr[IPSET_ATTR_SETNAME]) {
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL && s->ref) {
                                ret = -IPSET_ERR_BUSY;
                                goto out;
                        }
                }
                read_unlock_bh(&ip_set_ref_lock);
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL)
-                               ip_set_destroy_set(i);
+                               ip_set_destroy_set(inst, i);
                }
        } else {
-               s = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &i);
+               s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+                                   &i);
                if (s == NULL) {
                        ret = -ENOENT;
                        goto out;
@@ -918,7 +1014,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
                }
                read_unlock_bh(&ip_set_ref_lock);
 
-               ip_set_destroy_set(i);
+               ip_set_destroy_set(inst, i);
        }
        return 0;
 out:
@@ -943,6 +1039,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh,
             const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *s;
        ip_set_id_t i;
 
@@ -950,13 +1047,13 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
                return -IPSET_ERR_PROTOCOL;
 
        if (!attr[IPSET_ATTR_SETNAME]) {
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL)
                                ip_set_flush_set(s);
                }
        } else {
-               s = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+               s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
                if (s == NULL)
                        return -ENOENT;
 
@@ -982,6 +1079,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set, *s;
        const char *name2;
        ip_set_id_t i;
@@ -992,7 +1090,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME2] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1003,8 +1101,8 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
        }
 
        name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
-       for (i = 0; i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s != NULL && STREQ(s->name, name2)) {
                        ret = -IPSET_ERR_EXIST_SETNAME2;
                        goto out;
@@ -1031,6 +1129,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *from, *to;
        ip_set_id_t from_id, to_id;
        char from_name[IPSET_MAXNAMELEN];
@@ -1040,11 +1139,13 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME2] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       from = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &from_id);
+       from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+                              &from_id);
        if (from == NULL)
                return -ENOENT;
 
-       to = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id);
+       to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
+                            &to_id);
        if (to == NULL)
                return -IPSET_ERR_EXIST_SETNAME2;
 
@@ -1061,8 +1162,8 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
 
        write_lock_bh(&ip_set_ref_lock);
        swap(from->ref, to->ref);
-       nfnl_set(from_id) = to;
-       nfnl_set(to_id) = from;
+       nfnl_set(inst, from_id) = to;
+       nfnl_set(inst, to_id) = from;
        write_unlock_bh(&ip_set_ref_lock);
 
        return 0;
@@ -1081,9 +1182,10 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
 static int
 ip_set_dump_done(struct netlink_callback *cb)
 {
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
        if (cb->args[2]) {
-               pr_debug("release set %s\n", nfnl_set(cb->args[1])->name);
-               ip_set_put_byindex((ip_set_id_t) cb->args[1]);
+               pr_debug("release set %s\n", nfnl_set(inst, cb->args[1])->name);
+               __ip_set_put_byindex(inst, (ip_set_id_t) cb->args[1]);
        }
        return 0;
 }
@@ -1109,6 +1211,7 @@ dump_init(struct netlink_callback *cb)
        struct nlattr *attr = (void *)nlh + min_len;
        u32 dump_type;
        ip_set_id_t index;
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
 
        /* Second pass, so parser can't fail */
        nla_parse(cda, IPSET_ATTR_CMD_MAX,
@@ -1122,7 +1225,7 @@ dump_init(struct netlink_callback *cb)
        if (cda[IPSET_ATTR_SETNAME]) {
                struct ip_set *set;
 
-               set = find_set_and_id(nla_data(cda[IPSET_ATTR_SETNAME]),
+               set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
                                      &index);
                if (set == NULL)
                        return -ENOENT;
@@ -1150,6 +1253,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
        u32 dump_type, dump_flags;
        int ret = 0;
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
 
        if (!cb->args[0]) {
                ret = dump_init(cb);
@@ -1163,18 +1267,18 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
                }
        }
 
-       if (cb->args[1] >= ip_set_max)
+       if (cb->args[1] >= inst->ip_set_max)
                goto out;
 
        dump_type = DUMP_TYPE(cb->args[0]);
        dump_flags = DUMP_FLAGS(cb->args[0]);
-       max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+       max = dump_type == DUMP_ONE ? cb->args[1] + 1 : inst->ip_set_max;
 dump_last:
        pr_debug("args[0]: %u %u args[1]: %ld\n",
                 dump_type, dump_flags, cb->args[1]);
        for (; cb->args[1] < max; cb->args[1]++) {
                index = (ip_set_id_t) cb->args[1];
-               set = nfnl_set(index);
+               set = nfnl_set(inst, index);
                if (set == NULL) {
                        if (dump_type == DUMP_ONE) {
                                ret = -ENOENT;
@@ -1252,8 +1356,8 @@ next_set:
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[2]) {
-               pr_debug("release set %s\n", nfnl_set(index)->name);
-               ip_set_put_byindex(index);
+               pr_debug("release set %s\n", nfnl_set(inst, index)->name);
+               __ip_set_put_byindex(inst, index);
                cb->args[2] = 0;
        }
 out:
@@ -1271,6 +1375,8 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
+
        if (unlikely(protocol_failed(attr)))
                return -IPSET_ERR_PROTOCOL;
 
@@ -1278,6 +1384,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
                struct netlink_dump_control c = {
                        .dump = ip_set_dump_start,
                        .done = ip_set_dump_done,
+                       .data = (void *)inst
                };
                return netlink_dump_start(ctnl, skb, nlh, &c);
        }
@@ -1356,6 +1463,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        const struct nlattr *nla;
@@ -1374,7 +1482,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
                       attr[IPSET_ATTR_LINENO] == NULL))))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1410,6 +1518,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        const struct nlattr *nla;
@@ -1428,7 +1537,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
                       attr[IPSET_ATTR_LINENO] == NULL))))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1464,6 +1573,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh,
             const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        int ret = 0;
@@ -1474,7 +1584,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
                     !flag_nested(attr[IPSET_ATTR_DATA])))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1499,6 +1609,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        const struct ip_set *set;
        struct sk_buff *skb2;
        struct nlmsghdr *nlh2;
@@ -1508,7 +1619,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1733,8 +1844,10 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
        unsigned int *op;
        void *data;
        int copylen = *len, ret = 0;
+       struct net *net = sock_net(sk);
+       struct ip_set_net *inst = ip_set_pernet(net);
 
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
        if (optval != SO_IP_SET)
                return -EBADF;
@@ -1783,22 +1896,39 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
                }
                req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
                nfnl_lock(NFNL_SUBSYS_IPSET);
-               find_set_and_id(req_get->set.name, &id);
+               find_set_and_id(inst, req_get->set.name, &id);
                req_get->set.index = id;
                nfnl_unlock(NFNL_SUBSYS_IPSET);
                goto copy;
        }
+       case IP_SET_OP_GET_FNAME: {
+               struct ip_set_req_get_set_family *req_get = data;
+               ip_set_id_t id;
+
+               if (*len != sizeof(struct ip_set_req_get_set_family)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+               nfnl_lock(NFNL_SUBSYS_IPSET);
+               find_set_and_id(inst, req_get->set.name, &id);
+               req_get->set.index = id;
+               if (id != IPSET_INVALID_ID)
+                       req_get->family = nfnl_set(inst, id)->family;
+               nfnl_unlock(NFNL_SUBSYS_IPSET);
+               goto copy;
+       }
        case IP_SET_OP_GET_BYINDEX: {
                struct ip_set_req_get_set *req_get = data;
                struct ip_set *set;
 
                if (*len != sizeof(struct ip_set_req_get_set) ||
-                   req_get->set.index >= ip_set_max) {
+                   req_get->set.index >= inst->ip_set_max) {
                        ret = -EINVAL;
                        goto done;
                }
                nfnl_lock(NFNL_SUBSYS_IPSET);
-               set = nfnl_set(req_get->set.index);
+               set = nfnl_set(inst, req_get->set.index);
                strncpy(req_get->set.name, set ? set->name : "",
                        IPSET_MAXNAMELEN);
                nfnl_unlock(NFNL_SUBSYS_IPSET);
@@ -1827,49 +1957,82 @@ static struct nf_sockopt_ops so_set __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
-static int __init
-ip_set_init(void)
+static int __net_init
+ip_set_net_init(struct net *net)
 {
+       struct ip_set_net *inst = ip_set_pernet(net);
+
        struct ip_set **list;
-       int ret;
 
-       if (max_sets)
-               ip_set_max = max_sets;
-       if (ip_set_max >= IPSET_INVALID_ID)
-               ip_set_max = IPSET_INVALID_ID - 1;
+       inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
+       if (inst->ip_set_max >= IPSET_INVALID_ID)
+               inst->ip_set_max = IPSET_INVALID_ID - 1;
 
-       list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL);
+       list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
        if (!list)
                return -ENOMEM;
+       inst->is_deleted = 0;
+       rcu_assign_pointer(inst->ip_set_list, list);
+       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       return 0;
+}
+
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+       struct ip_set_net *inst = ip_set_pernet(net);
+
+       struct ip_set *set = NULL;
+       ip_set_id_t i;
+
+       inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
+
+       for (i = 0; i < inst->ip_set_max; i++) {
+               set = nfnl_set(inst, i);
+               if (set != NULL)
+                       ip_set_destroy_set(inst, i);
+       }
+       kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+}
+
+static struct pernet_operations ip_set_net_ops = {
+       .init   = ip_set_net_init,
+       .exit   = ip_set_net_exit,
+       .id     = &ip_set_net_id,
+       .size   = sizeof(struct ip_set_net)
+};
+
 
-       rcu_assign_pointer(ip_set_list, list);
-       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+static int __init
+ip_set_init(void)
+{
+       int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
        if (ret != 0) {
                pr_err("ip_set: cannot register with nfnetlink.\n");
-               kfree(list);
                return ret;
        }
        ret = nf_register_sockopt(&so_set);
        if (ret != 0) {
                pr_err("SO_SET registry failed: %d\n", ret);
                nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-               kfree(list);
                return ret;
        }
-
-       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       ret = register_pernet_subsys(&ip_set_net_ops);
+       if (ret) {
+               pr_err("ip_set: cannot register pernet_subsys.\n");
+               nf_unregister_sockopt(&so_set);
+               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               return ret;
+       }
        return 0;
 }
 
 static void __exit
 ip_set_fini(void)
 {
-       struct ip_set **list = rcu_dereference_protected(ip_set_list, 1);
-
-       /* There can't be any existing set */
+       unregister_pernet_subsys(&ip_set_net_ops);
        nf_unregister_sockopt(&so_set);
        nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-       kfree(list);
        pr_debug("these are the famous last words\n");
 }
 
index dac156f819ac2f2fe25c876d800aadd2c47c0752..29fb01ddff93b0a0da7ad1dc3691a141376f5007 100644 (file)
@@ -102,9 +102,25 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
        int protocol = iph->protocol;
 
        /* See comments at tcp_match in ip_tables.c */
-       if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+       if (protocol <= 0)
                return false;
 
+       if (ntohs(iph->frag_off) & IP_OFFSET)
+               switch (protocol) {
+               case IPPROTO_TCP:
+               case IPPROTO_SCTP:
+               case IPPROTO_UDP:
+               case IPPROTO_UDPLITE:
+               case IPPROTO_ICMP:
+                       /* Port info not available for fragment offset > 0 */
+                       return false;
+               default:
+                       /* Other protocols doesn't have ports,
+                          so we can match fragments */
+                       *proto = protocol;
+                       return true;
+               }
+
        return get_port(skb, protocol, protooff, src, port, proto);
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
index 707bc520d629f311dd9978d2e0bf222719f71c82..6a80dbd30df7bea89d0279a5752faa668f2f68a7 100644 (file)
@@ -15,8 +15,7 @@
 #define rcu_dereference_bh(p)  rcu_dereference(p)
 #endif
 
-#define CONCAT(a, b)           a##b
-#define TOKEN(a, b)            CONCAT(a, b)
+#define rcu_dereference_bh_nfnl(p)     rcu_dereference_bh_check(p, 1)
 
 /* Hashing which uses arrays to resolve clashing. The hash table is resized
  * (doubled) when searching becomes too long.
@@ -78,10 +77,14 @@ struct htable {
 
 #define hbucket(h, i)          (&((h)->bucket[i]))
 
+#ifndef IPSET_NET_COUNT
+#define IPSET_NET_COUNT                1
+#endif
+
 /* Book-keeping of the prefixes added to the set */
 struct net_prefixes {
-       u8 cidr;                /* the different cidr values in the set */
-       u32 nets;               /* number of elements per cidr */
+       u32 nets[IPSET_NET_COUNT]; /* number of elements per cidr */
+       u8 cidr[IPSET_NET_COUNT];  /* the different cidr values in the set */
 };
 
 /* Compute the hash table size */
@@ -114,23 +117,6 @@ htable_bits(u32 hashsize)
        return bits;
 }
 
-/* Destroy the hashtable part of the set */
-static void
-ahash_destroy(struct htable *t)
-{
-       struct hbucket *n;
-       u32 i;
-
-       for (i = 0; i < jhash_size(t->htable_bits); i++) {
-               n = hbucket(t, i);
-               if (n->size)
-                       /* FIXME: use slab cache */
-                       kfree(n->value);
-       }
-
-       ip_set_free(t);
-}
-
 static int
 hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 {
@@ -156,30 +142,30 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 }
 
 #ifdef IP_SET_HASH_WITH_NETS
+#if IPSET_NET_COUNT > 1
+#define __CIDR(cidr, i)                (cidr[i])
+#else
+#define __CIDR(cidr, i)                (cidr)
+#endif
 #ifdef IP_SET_HASH_WITH_NETS_PACKED
 /* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr)             (cidr + 1)
+#define CIDR(cidr, i)          (__CIDR(cidr, i) + 1)
 #else
-#define CIDR(cidr)             (cidr)
+#define CIDR(cidr, i)          (__CIDR(cidr, i))
 #endif
 
 #define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
 
 #ifdef IP_SET_HASH_WITH_MULTI
-#define NETS_LENGTH(family)    (SET_HOST_MASK(family) + 1)
+#define NLEN(family)           (SET_HOST_MASK(family) + 1)
 #else
-#define NETS_LENGTH(family)    SET_HOST_MASK(family)
+#define NLEN(family)           SET_HOST_MASK(family)
 #endif
 
 #else
-#define NETS_LENGTH(family)    0
+#define NLEN(family)           0
 #endif /* IP_SET_HASH_WITH_NETS */
 
-#define ext_timeout(e, h)      \
-(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, h)      \
-(struct ip_set_counter *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_COUNTER])
-
 #endif /* _IP_SET_HASH_GEN_H */
 
 /* Family dependent templates */
@@ -194,6 +180,8 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #undef mtype_data_next
 #undef mtype_elem
 
+#undef mtype_ahash_destroy
+#undef mtype_ext_cleanup
 #undef mtype_add_cidr
 #undef mtype_del_cidr
 #undef mtype_ahash_memsize
@@ -220,41 +208,44 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 
 #undef HKEY
 
-#define mtype_data_equal       TOKEN(MTYPE, _data_equal)
+#define mtype_data_equal       IPSET_TOKEN(MTYPE, _data_equal)
 #ifdef IP_SET_HASH_WITH_NETS
-#define mtype_do_data_match    TOKEN(MTYPE, _do_data_match)
+#define mtype_do_data_match    IPSET_TOKEN(MTYPE, _do_data_match)
 #else
 #define mtype_do_data_match(d) 1
 #endif
-#define mtype_data_set_flags   TOKEN(MTYPE, _data_set_flags)
-#define mtype_data_reset_flags TOKEN(MTYPE, _data_reset_flags)
-#define mtype_data_netmask     TOKEN(MTYPE, _data_netmask)
-#define mtype_data_list                TOKEN(MTYPE, _data_list)
-#define mtype_data_next                TOKEN(MTYPE, _data_next)
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_add_cidr         TOKEN(MTYPE, _add_cidr)
-#define mtype_del_cidr         TOKEN(MTYPE, _del_cidr)
-#define mtype_ahash_memsize    TOKEN(MTYPE, _ahash_memsize)
-#define mtype_flush            TOKEN(MTYPE, _flush)
-#define mtype_destroy          TOKEN(MTYPE, _destroy)
-#define mtype_gc_init          TOKEN(MTYPE, _gc_init)
-#define mtype_same_set         TOKEN(MTYPE, _same_set)
-#define mtype_kadt             TOKEN(MTYPE, _kadt)
-#define mtype_uadt             TOKEN(MTYPE, _uadt)
+#define mtype_data_set_flags   IPSET_TOKEN(MTYPE, _data_set_flags)
+#define mtype_data_reset_elem  IPSET_TOKEN(MTYPE, _data_reset_elem)
+#define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
+#define mtype_data_netmask     IPSET_TOKEN(MTYPE, _data_netmask)
+#define mtype_data_list                IPSET_TOKEN(MTYPE, _data_list)
+#define mtype_data_next                IPSET_TOKEN(MTYPE, _data_next)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_ahash_destroy    IPSET_TOKEN(MTYPE, _ahash_destroy)
+#define mtype_ext_cleanup      IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_add_cidr         IPSET_TOKEN(MTYPE, _add_cidr)
+#define mtype_del_cidr         IPSET_TOKEN(MTYPE, _del_cidr)
+#define mtype_ahash_memsize    IPSET_TOKEN(MTYPE, _ahash_memsize)
+#define mtype_flush            IPSET_TOKEN(MTYPE, _flush)
+#define mtype_destroy          IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_same_set         IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_kadt             IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt             IPSET_TOKEN(MTYPE, _uadt)
 #define mtype                  MTYPE
 
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_add              TOKEN(MTYPE, _add)
-#define mtype_del              TOKEN(MTYPE, _del)
-#define mtype_test_cidrs       TOKEN(MTYPE, _test_cidrs)
-#define mtype_test             TOKEN(MTYPE, _test)
-#define mtype_expire           TOKEN(MTYPE, _expire)
-#define mtype_resize           TOKEN(MTYPE, _resize)
-#define mtype_head             TOKEN(MTYPE, _head)
-#define mtype_list             TOKEN(MTYPE, _list)
-#define mtype_gc               TOKEN(MTYPE, _gc)
-#define mtype_variant          TOKEN(MTYPE, _variant)
-#define mtype_data_match       TOKEN(MTYPE, _data_match)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_add              IPSET_TOKEN(MTYPE, _add)
+#define mtype_del              IPSET_TOKEN(MTYPE, _del)
+#define mtype_test_cidrs       IPSET_TOKEN(MTYPE, _test_cidrs)
+#define mtype_test             IPSET_TOKEN(MTYPE, _test)
+#define mtype_expire           IPSET_TOKEN(MTYPE, _expire)
+#define mtype_resize           IPSET_TOKEN(MTYPE, _resize)
+#define mtype_head             IPSET_TOKEN(MTYPE, _head)
+#define mtype_list             IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
+#define mtype_variant          IPSET_TOKEN(MTYPE, _variant)
+#define mtype_data_match       IPSET_TOKEN(MTYPE, _data_match)
 
 #ifndef HKEY_DATALEN
 #define HKEY_DATALEN           sizeof(struct mtype_elem)
@@ -269,13 +260,10 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 
 /* The generic hash structure */
 struct htype {
-       struct htable *table;   /* the hash table */
+       struct htable __rcu *table; /* the hash table */
        u32 maxelem;            /* max elements in the hash */
        u32 elements;           /* current element (vs timeout) */
        u32 initval;            /* random jhash init value */
-       u32 timeout;            /* timeout value, if enabled */
-       size_t dsize;           /* data struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        struct timer_list gc;   /* garbage collection when timeout enabled */
        struct mtype_elem next; /* temporary storage for uadd */
 #ifdef IP_SET_HASH_WITH_MULTI
@@ -297,49 +285,49 @@ struct htype {
 /* Network cidr size book keeping when the hash stores different
  * sized networks */
 static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
        int i, j;
 
        /* Add in increasing prefix order, so larger cidr first */
-       for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+       for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
                if (j != -1)
                        continue;
-               else if (h->nets[i].cidr < cidr)
+               else if (h->nets[i].cidr[n] < cidr)
                        j = i;
-               else if (h->nets[i].cidr == cidr) {
-                       h->nets[i].nets++;
+               else if (h->nets[i].cidr[n] == cidr) {
+                       h->nets[i].nets[n]++;
                        return;
                }
        }
        if (j != -1) {
                for (; i > j; i--) {
-                       h->nets[i].cidr = h->nets[i - 1].cidr;
-                       h->nets[i].nets = h->nets[i - 1].nets;
+                       h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
+                       h->nets[i].nets[n] = h->nets[i - 1].nets[n];
                }
        }
-       h->nets[i].cidr = cidr;
-       h->nets[i].nets = 1;
+       h->nets[i].cidr[n] = cidr;
+       h->nets[i].nets[n] = 1;
 }
 
 static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
        u8 i, j, net_end = nets_length - 1;
 
        for (i = 0; i < nets_length; i++) {
-               if (h->nets[i].cidr != cidr)
+               if (h->nets[i].cidr[n] != cidr)
                        continue;
-                if (h->nets[i].nets > 1 || i == net_end ||
-                    h->nets[i + 1].nets == 0) {
-                        h->nets[i].nets--;
+                if (h->nets[i].nets[n] > 1 || i == net_end ||
+                    h->nets[i + 1].nets[n] == 0) {
+                        h->nets[i].nets[n]--;
                         return;
                 }
-                for (j = i; j < net_end && h->nets[j].nets; j++) {
-                       h->nets[j].cidr = h->nets[j + 1].cidr;
-                       h->nets[j].nets = h->nets[j + 1].nets;
+                for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+                       h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
+                       h->nets[j].nets[n] = h->nets[j + 1].nets[n];
                 }
-                h->nets[j].nets = 0;
+                h->nets[j].nets[n] = 0;
                 return;
        }
 }
@@ -347,10 +335,10 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
 
 /* Calculate the actual memory size of the set data */
 static size_t
-mtype_ahash_memsize(const struct htype *h, u8 nets_length)
+mtype_ahash_memsize(const struct htype *h, const struct htable *t,
+                   u8 nets_length, size_t dsize)
 {
        u32 i;
-       struct htable *t = h->table;
        size_t memsize = sizeof(*h)
                         + sizeof(*t)
 #ifdef IP_SET_HASH_WITH_NETS
@@ -359,35 +347,70 @@ mtype_ahash_memsize(const struct htype *h, u8 nets_length)
                         + jhash_size(t->htable_bits) * sizeof(struct hbucket);
 
        for (i = 0; i < jhash_size(t->htable_bits); i++)
-               memsize += t->bucket[i].size * h->dsize;
+               memsize += t->bucket[i].size * dsize;
 
        return memsize;
 }
 
+/* Get the ith element from the array block n */
+#define ahash_data(n, i, dsize)        \
+       ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
+
+static void
+mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
+{
+       int i;
+
+       for (i = 0; i < n->pos; i++)
+               ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
+}
+
 /* Flush a hash type of set: destroy all elements */
 static void
 mtype_flush(struct ip_set *set)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        struct hbucket *n;
        u32 i;
 
+       t = rcu_dereference_bh_nfnl(h->table);
        for (i = 0; i < jhash_size(t->htable_bits); i++) {
                n = hbucket(t, i);
                if (n->size) {
+                       if (set->extensions & IPSET_EXT_DESTROY)
+                               mtype_ext_cleanup(set, n);
                        n->size = n->pos = 0;
                        /* FIXME: use slab cache */
                        kfree(n->value);
                }
        }
 #ifdef IP_SET_HASH_WITH_NETS
-       memset(h->nets, 0, sizeof(struct net_prefixes)
-                          * NETS_LENGTH(set->family));
+       memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
 #endif
        h->elements = 0;
 }
 
+/* Destroy the hashtable part of the set */
+static void
+mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+{
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size) {
+                       if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+                               mtype_ext_cleanup(set, n);
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+               }
+       }
+
+       ip_set_free(t);
+}
+
 /* Destroy a hash type of set */
 static void
 mtype_destroy(struct ip_set *set)
@@ -397,7 +420,7 @@ mtype_destroy(struct ip_set *set)
        if (set->extensions & IPSET_EXT_TIMEOUT)
                del_timer_sync(&h->gc);
 
-       ahash_destroy(h->table);
+       mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true);
 #ifdef IP_SET_HASH_WITH_RBTREE
        rbtree_destroy(&h->rbtree);
 #endif
@@ -414,10 +437,10 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&h->gc);
        h->gc.data = (unsigned long) set;
        h->gc.function = gc;
-       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&h->gc);
        pr_debug("gc initialized, run in every %u\n",
-                IPSET_GC_PERIOD(h->timeout));
+                IPSET_GC_PERIOD(set->timeout));
 }
 
 static bool
@@ -428,37 +451,40 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
 
        /* Resizing changes htable_bits, so we ignore it */
        return x->maxelem == y->maxelem &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
 #ifdef IP_SET_HASH_WITH_NETMASK
               x->netmask == y->netmask &&
 #endif
               a->extensions == b->extensions;
 }
 
-/* Get the ith element from the array block n */
-#define ahash_data(n, i, dsize)        \
-       ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
-
 /* Delete expired elements from the hashtable */
 static void
-mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
+mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
 {
-       struct htable *t = h->table;
+       struct htable *t;
        struct hbucket *n;
        struct mtype_elem *data;
        u32 i;
        int j;
+#ifdef IP_SET_HASH_WITH_NETS
+       u8 k;
+#endif
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
        for (i = 0; i < jhash_size(t->htable_bits); i++) {
                n = hbucket(t, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_data(n, j, dsize);
-                       if (ip_set_timeout_expired(ext_timeout(data, h))) {
+                       if (ip_set_timeout_expired(ext_timeout(data, set))) {
                                pr_debug("expired %u/%u\n", i, j);
 #ifdef IP_SET_HASH_WITH_NETS
-                               mtype_del_cidr(h, CIDR(data->cidr),
-                                              nets_length);
+                               for (k = 0; k < IPSET_NET_COUNT; k++)
+                                       mtype_del_cidr(h, CIDR(data->cidr, k),
+                                                      nets_length, k);
 #endif
+                               ip_set_ext_destroy(set, data);
                                if (j != n->pos - 1)
                                        /* Not last one */
                                        memcpy(data,
@@ -481,6 +507,7 @@ mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
                        n->value = tmp;
                }
        }
+       rcu_read_unlock_bh();
 }
 
 static void
@@ -491,10 +518,10 @@ mtype_gc(unsigned long ul_set)
 
        pr_debug("called\n");
        write_lock_bh(&set->lock);
-       mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+       mtype_expire(set, h, NLEN(set->family), set->dsize);
        write_unlock_bh(&set->lock);
 
-       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&h->gc);
 }
 
@@ -505,7 +532,7 @@ static int
 mtype_resize(struct ip_set *set, bool retried)
 {
        struct htype *h = set->data;
-       struct htable *t, *orig = h->table;
+       struct htable *t, *orig = rcu_dereference_bh_nfnl(h->table);
        u8 htable_bits = orig->htable_bits;
 #ifdef IP_SET_HASH_WITH_NETS
        u8 flags;
@@ -520,8 +547,7 @@ mtype_resize(struct ip_set *set, bool retried)
        if (SET_WITH_TIMEOUT(set) && !retried) {
                i = h->elements;
                write_lock_bh(&set->lock);
-               mtype_expire(set->data, NETS_LENGTH(set->family),
-                            h->dsize);
+               mtype_expire(set, set->data, NLEN(set->family), set->dsize);
                write_unlock_bh(&set->lock);
                if (h->elements < i)
                        return 0;
@@ -548,25 +574,25 @@ retry:
        for (i = 0; i < jhash_size(orig->htable_bits); i++) {
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
-                       data = ahash_data(n, j, h->dsize);
+                       data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
                        flags = 0;
                        mtype_data_reset_flags(data, &flags);
 #endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize);
+                       ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize);
                        if (ret < 0) {
 #ifdef IP_SET_HASH_WITH_NETS
                                mtype_data_reset_flags(data, &flags);
 #endif
                                read_unlock_bh(&set->lock);
-                               ahash_destroy(t);
+                               mtype_ahash_destroy(set, t, false);
                                if (ret == -EAGAIN)
                                        goto retry;
                                return ret;
                        }
-                       d = ahash_data(m, m->pos++, h->dsize);
-                       memcpy(d, data, h->dsize);
+                       d = ahash_data(m, m->pos++, set->dsize);
+                       memcpy(d, data, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
                        mtype_data_reset_flags(d, &flags);
 #endif
@@ -581,7 +607,7 @@ retry:
 
        pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
                 orig->htable_bits, orig, t->htable_bits, t);
-       ahash_destroy(orig);
+       mtype_ahash_destroy(set, orig, false);
 
        return 0;
 }
@@ -604,7 +630,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 
        if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
                /* FIXME: when set is full, we slow down here */
-               mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+               mtype_expire(set, h, NLEN(set->family), set->dsize);
 
        if (h->elements >= h->maxelem) {
                if (net_ratelimit())
@@ -618,11 +644,11 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        key = HKEY(value, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (mtype_data_equal(data, d, &multi)) {
                        if (flag_exist ||
                            (SET_WITH_TIMEOUT(set) &&
-                            ip_set_timeout_expired(ext_timeout(data, h)))) {
+                            ip_set_timeout_expired(ext_timeout(data, set)))) {
                                /* Just the extensions could be overwritten */
                                j = i;
                                goto reuse_slot;
@@ -633,30 +659,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
                }
                /* Reuse first timed out entry */
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(data, h)) &&
+                   ip_set_timeout_expired(ext_timeout(data, set)) &&
                    j != AHASH_MAX(h) + 1)
                        j = i;
        }
 reuse_slot:
        if (j != AHASH_MAX(h) + 1) {
                /* Fill out reused slot */
-               data = ahash_data(n, j, h->dsize);
+               data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
-               mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (i = 0; i < IPSET_NET_COUNT; i++) {
+                       mtype_del_cidr(h, CIDR(data->cidr, i),
+                                      NLEN(set->family), i);
+                       mtype_add_cidr(h, CIDR(d->cidr, i),
+                                      NLEN(set->family), i);
+               }
 #endif
+               ip_set_ext_destroy(set, data);
        } else {
                /* Use/create a new slot */
                TUNE_AHASH_MAX(h, multi);
-               ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize);
+               ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize);
                if (ret != 0) {
                        if (ret == -EAGAIN)
                                mtype_data_next(&h->next, d);
                        goto out;
                }
-               data = ahash_data(n, n->pos++, h->dsize);
+               data = ahash_data(n, n->pos++, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (i = 0; i < IPSET_NET_COUNT; i++)
+                       mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+                                      i);
 #endif
                h->elements++;
        }
@@ -665,9 +698,11 @@ reuse_slot:
        mtype_data_set_flags(data, flags);
 #endif
        if (SET_WITH_TIMEOUT(set))
-               ip_set_timeout_set(ext_timeout(data, h), ext->timeout);
+               ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(data, h), ext);
+               ip_set_init_counter(ext_counter(data, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(data, set), ext);
 
 out:
        rcu_read_unlock_bh();
@@ -682,47 +717,60 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
          struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        const struct mtype_elem *d = value;
        struct mtype_elem *data;
        struct hbucket *n;
-       int i;
+       int i, ret = -IPSET_ERR_EXIST;
+#ifdef IP_SET_HASH_WITH_NETS
+       u8 j;
+#endif
        u32 key, multi = 0;
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
        key = HKEY(value, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (!mtype_data_equal(data, d, &multi))
                        continue;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(data, h)))
-                       return -IPSET_ERR_EXIST;
+                   ip_set_timeout_expired(ext_timeout(data, set)))
+                       goto out;
                if (i != n->pos - 1)
                        /* Not last one */
-                       memcpy(data, ahash_data(n, n->pos - 1, h->dsize),
-                              h->dsize);
+                       memcpy(data, ahash_data(n, n->pos - 1, set->dsize),
+                              set->dsize);
 
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (j = 0; j < IPSET_NET_COUNT; j++)
+                       mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+                                      j);
 #endif
+               ip_set_ext_destroy(set, data);
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
-                                           * h->dsize,
+                                           * set->dsize,
                                            GFP_ATOMIC);
-                       if (!tmp)
-                               return 0;
+                       if (!tmp) {
+                               ret = 0;
+                               goto out;
+                       }
                        n->size -= AHASH_INIT_SIZE;
-                       memcpy(tmp, n->value, n->size * h->dsize);
+                       memcpy(tmp, n->value, n->size * set->dsize);
                        kfree(n->value);
                        n->value = tmp;
                }
-               return 0;
+               ret = 0;
+               goto out;
        }
 
-       return -IPSET_ERR_EXIST;
+out:
+       rcu_read_unlock_bh();
+       return ret;
 }
 
 static inline int
@@ -730,8 +778,7 @@ mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
                 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
 {
        if (SET_WITH_COUNTER(set))
-               ip_set_update_counter(ext_counter(data,
-                                                 (struct htype *)(set->data)),
+               ip_set_update_counter(ext_counter(data, set),
                                      ext, mext, flags);
        return mtype_do_data_match(data);
 }
@@ -745,25 +792,38 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
                 struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t = rcu_dereference_bh(h->table);
        struct hbucket *n;
        struct mtype_elem *data;
+#if IPSET_NET_COUNT == 2
+       struct mtype_elem orig = *d;
+       int i, j = 0, k;
+#else
        int i, j = 0;
+#endif
        u32 key, multi = 0;
-       u8 nets_length = NETS_LENGTH(set->family);
+       u8 nets_length = NLEN(set->family);
 
        pr_debug("test by nets\n");
-       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
-               mtype_data_netmask(d, h->nets[j].cidr);
+       for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+#if IPSET_NET_COUNT == 2
+               mtype_data_reset_elem(d, &orig);
+               mtype_data_netmask(d, h->nets[j].cidr[0], false);
+               for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+                    k++) {
+                       mtype_data_netmask(d, h->nets[k].cidr[1], true);
+#else
+               mtype_data_netmask(d, h->nets[j].cidr[0]);
+#endif
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
                for (i = 0; i < n->pos; i++) {
-                       data = ahash_data(n, i, h->dsize);
+                       data = ahash_data(n, i, set->dsize);
                        if (!mtype_data_equal(data, d, &multi))
                                continue;
                        if (SET_WITH_TIMEOUT(set)) {
                                if (!ip_set_timeout_expired(
-                                                       ext_timeout(data, h)))
+                                               ext_timeout(data, set)))
                                        return mtype_data_match(data, ext,
                                                                mext, set,
                                                                flags);
@@ -774,6 +834,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
                                return mtype_data_match(data, ext,
                                                        mext, set, flags);
                }
+#if IPSET_NET_COUNT == 2
+               }
+#endif
        }
        return 0;
 }
@@ -785,30 +848,41 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
           struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        struct mtype_elem *d = value;
        struct hbucket *n;
        struct mtype_elem *data;
-       int i;
+       int i, ret = 0;
        u32 key, multi = 0;
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
 #ifdef IP_SET_HASH_WITH_NETS
        /* If we test an IP address and not a network address,
         * try all possible network sizes */
-       if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
-               return mtype_test_cidrs(set, d, ext, mext, flags);
+       for (i = 0; i < IPSET_NET_COUNT; i++)
+               if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+                       break;
+       if (i == IPSET_NET_COUNT) {
+               ret = mtype_test_cidrs(set, d, ext, mext, flags);
+               goto out;
+       }
 #endif
 
        key = HKEY(d, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (mtype_data_equal(data, d, &multi) &&
                    !(SET_WITH_TIMEOUT(set) &&
-                     ip_set_timeout_expired(ext_timeout(data, h))))
-                       return mtype_data_match(data, ext, mext, set, flags);
+                     ip_set_timeout_expired(ext_timeout(data, set)))) {
+                       ret = mtype_data_match(data, ext, mext, set, flags);
+                       goto out;
+               }
        }
-       return 0;
+out:
+       rcu_read_unlock_bh();
+       return ret;
 }
 
 /* Reply a HEADER request: fill out the header part of the set */
@@ -816,18 +890,18 @@ static int
 mtype_head(struct ip_set *set, struct sk_buff *skb)
 {
        const struct htype *h = set->data;
+       const struct htable *t;
        struct nlattr *nested;
        size_t memsize;
 
-       read_lock_bh(&set->lock);
-       memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family));
-       read_unlock_bh(&set->lock);
+       t = rcu_dereference_bh_nfnl(h->table);
+       memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
 
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
        if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
-                         htonl(jhash_size(h->table->htable_bits))) ||
+                         htonl(jhash_size(t->htable_bits))) ||
            nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
                goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
@@ -836,12 +910,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
                goto nla_put_failure;
 #endif
        if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
-           nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
-           ((set->extensions & IPSET_EXT_TIMEOUT) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))) ||
-           ((set->extensions & IPSET_EXT_COUNTER) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))))
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -856,7 +927,7 @@ mtype_list(const struct ip_set *set,
           struct sk_buff *skb, struct netlink_callback *cb)
 {
        const struct htype *h = set->data;
-       const struct htable *t = h->table;
+       const struct htable *t = rcu_dereference_bh_nfnl(h->table);
        struct nlattr *atd, *nested;
        const struct hbucket *n;
        const struct mtype_elem *e;
@@ -874,9 +945,9 @@ mtype_list(const struct ip_set *set,
                n = hbucket(t, cb->args[2]);
                pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
                for (i = 0; i < n->pos; i++) {
-                       e = ahash_data(n, i, h->dsize);
+                       e = ahash_data(n, i, set->dsize);
                        if (SET_WITH_TIMEOUT(set) &&
-                           ip_set_timeout_expired(ext_timeout(e, h)))
+                           ip_set_timeout_expired(ext_timeout(e, set)))
                                continue;
                        pr_debug("list hash %lu hbucket %p i %u, data %p\n",
                                 cb->args[2], n, i, e);
@@ -890,13 +961,7 @@ mtype_list(const struct ip_set *set,
                        }
                        if (mtype_data_list(skb, e))
                                goto nla_put_failure;
-                       if (SET_WITH_TIMEOUT(set) &&
-                           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_get(
-                                               ext_timeout(e, h)))))
-                               goto nla_put_failure;
-                       if (SET_WITH_COUNTER(set) &&
-                           ip_set_put_counter(skb, ext_counter(e, h)))
+                       if (ip_set_put_extensions(skb, set, e, true))
                                goto nla_put_failure;
                        ipset_nest_end(skb, nested);
                }
@@ -909,24 +974,24 @@ mtype_list(const struct ip_set *set,
 
 nla_put_failure:
        nlmsg_trim(skb, incomplete);
-       ipset_nest_end(skb, atd);
        if (unlikely(first == cb->args[2])) {
                pr_warning("Can't list set %s: one bucket does not fit into "
                           "a message. Please report it!\n", set->name);
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, atd);
        return 0;
 }
 
 static int
-TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
-             const struct xt_action_param *par,
-             enum ipset_adt adt, struct ip_set_adt_opt *opt);
+IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
+           const struct xt_action_param *par,
+           enum ipset_adt adt, struct ip_set_adt_opt *opt);
 
 static int
-TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
-             enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
+           enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
 
 static const struct ip_set_type_variant mtype_variant = {
        .kadt   = mtype_kadt,
@@ -946,16 +1011,17 @@ static const struct ip_set_type_variant mtype_variant = {
 
 #ifdef IP_SET_EMIT_CREATE
 static int
-TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
+IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
+                           struct nlattr *tb[], u32 flags)
 {
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
-       u32 cadt_flags = 0;
        u8 hbits;
 #ifdef IP_SET_HASH_WITH_NETMASK
        u8 netmask;
 #endif
        size_t hsize;
        struct HTYPE *h;
+       struct htable *t;
 
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
@@ -1005,7 +1071,7 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
        h->netmask = netmask;
 #endif
        get_random_bytes(&h->initval, sizeof(h->initval));
-       h->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        hbits = htable_bits(hashsize);
        hsize = htable_size(hbits);
@@ -1013,91 +1079,37 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
                kfree(h);
                return -ENOMEM;
        }
-       h->table = ip_set_alloc(hsize);
-       if (!h->table) {
+       t = ip_set_alloc(hsize);
+       if (!t) {
                kfree(h);
                return -ENOMEM;
        }
-       h->table->htable_bits = hbits;
+       t->htable_bits = hbits;
+       rcu_assign_pointer(h->table, t);
 
        set->data = h;
-       if (set->family ==  NFPROTO_IPV4)
-               set->variant = &TOKEN(HTYPE, 4_variant);
-       else
-               set->variant = &TOKEN(HTYPE, 6_variant);
-
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       h->timeout =
-                               ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       if (set->family == NFPROTO_IPV4) {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 4ct_elem));
-                               h->offset[IPSET_OFFSET_TIMEOUT] =
-                                       offsetof(struct TOKEN(HTYPE, 4ct_elem),
-                                                timeout);
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 4ct_elem),
-                                                counter);
-                               TOKEN(HTYPE, 4_gc_init)(set,
-                                       TOKEN(HTYPE, 4_gc));
-                       } else {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 6ct_elem));
-                               h->offset[IPSET_OFFSET_TIMEOUT] =
-                                       offsetof(struct TOKEN(HTYPE, 6ct_elem),
-                                                timeout);
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 6ct_elem),
-                                                counter);
-                               TOKEN(HTYPE, 6_gc_init)(set,
-                                       TOKEN(HTYPE, 6_gc));
-                       }
-               } else {
-                       if (set->family == NFPROTO_IPV4) {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 4c_elem));
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 4c_elem),
-                                                counter);
-                       } else {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 6c_elem));
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 6c_elem),
-                                                counter);
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
-               if (set->family == NFPROTO_IPV4) {
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem));
-                       h->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct TOKEN(HTYPE, 4t_elem),
-                                        timeout);
-                       TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc));
-               } else {
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem));
-                       h->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct TOKEN(HTYPE, 6t_elem),
-                                        timeout);
-                       TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc));
-               }
+       if (set->family == NFPROTO_IPV4) {
+               set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
+               set->dsize = ip_set_elem_len(set, tb,
+                               sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
        } else {
+               set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
+               set->dsize = ip_set_elem_len(set, tb,
+                               sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                if (set->family == NFPROTO_IPV4)
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem));
+                       IPSET_TOKEN(HTYPE, 4_gc_init)(set,
+                               IPSET_TOKEN(HTYPE, 4_gc));
                else
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem));
+                       IPSET_TOKEN(HTYPE, 6_gc_init)(set,
+                               IPSET_TOKEN(HTYPE, 6_gc));
        }
 
        pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
-                set->name, jhash_size(h->table->htable_bits),
-                h->table->htable_bits, h->maxelem, set->data, h->table);
+                set->name, jhash_size(t->htable_bits),
+                t->htable_bits, h->maxelem, set->data, t);
 
        return 0;
 }
index c74e6e14cd933f4f20bbd5496312fdd0056b7e52..e65fc2423d56dd2b21cee513786eec41ceabefc9 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counters support */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counters support */
+#define IPSET_TYPE_REV_MAX     2       /* Comments support */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ip
 #define IP_SET_HASH_WITH_NETMASK
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ip4_elem {
@@ -43,22 +44,6 @@ struct hash_ip4_elem {
        __be32 ip;
 };
 
-struct hash_ip4t_elem {
-       __be32 ip;
-       unsigned long timeout;
-};
-
-struct hash_ip4c_elem {
-       __be32 ip;
-       struct ip_set_counter counter;
-};
-
-struct hash_ip4ct_elem {
-       __be32 ip;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -99,7 +84,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip4_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        __be32 ip;
 
        ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
@@ -118,8 +103,8 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip4_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, hosts;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, hosts;
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -178,29 +163,13 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 /* Member elements */
 struct hash_ip6_elem {
        union nf_inet_addr ip;
 };
 
-struct hash_ip6t_elem {
-       union nf_inet_addr ip;
-       unsigned long timeout;
-};
-
-struct hash_ip6c_elem {
-       union nf_inet_addr ip;
-       struct ip_set_counter counter;
-};
-
-struct hash_ip6ct_elem {
-       union nf_inet_addr ip;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -253,7 +222,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip6_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
        hash_ip6_netmask(&e.ip, h->netmask);
@@ -270,7 +239,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip6_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -304,8 +273,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -324,6 +293,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 7a2d2bd98d046f7b9eec8bc10d099c3794a1e0f6..525a595dd1fe4bf0efe6db7ca9cc06d995c66430 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-#define REVISION_MAX   2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Counters support added */
+#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ipport
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipport4_elem {
@@ -46,31 +47,6 @@ struct hash_ipport4_elem {
        u8 padding;
 };
 
-struct hash_ipport4t_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipport4c_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipport4ct_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -116,10 +92,9 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
                  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -136,8 +111,8 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        bool with_ports = false;
        int ret;
 
@@ -222,7 +197,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipport6_elem {
        union nf_inet_addr ip;
@@ -231,31 +206,6 @@ struct hash_ipport6_elem {
        u8 padding;
 };
 
-struct hash_ipport6t_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipport6c_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipport6ct_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -306,10 +256,9 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
                  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -326,7 +275,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        int ret;
@@ -396,8 +345,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -419,6 +368,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 34e8a1acce42219686c897f4f1db30387b733b43..f5636631466eb3ee98509e8b832e46da4ad9a417 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-#define REVISION_MAX   2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Counters support added */
+#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,ip");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ipportip
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_ipportip4_elem {
@@ -47,34 +48,6 @@ struct hash_ipportip4_elem {
        u8 padding;
 };
 
-struct hash_ipportip4t_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipportip4c_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportip4ct_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 static inline bool
 hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
                          const struct hash_ipportip4_elem *ip2,
@@ -120,10 +93,9 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -141,8 +113,8 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        bool with_ports = false;
        int ret;
 
@@ -231,7 +203,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportip6_elem {
        union nf_inet_addr ip;
@@ -241,34 +213,6 @@ struct hash_ipportip6_elem {
        u8 padding;
 };
 
-struct hash_ipportip6t_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipportip6c_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportip6ct_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -319,10 +263,9 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -340,7 +283,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        int ret;
@@ -414,8 +357,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipportip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -437,6 +380,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index f15f3e28b9c338c6e72d3aa949d9d241c0e7cabe..5d87fe8a41ffa4888a70b6d91897360e30c253bc 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-/*                     2    Range as input support for IPv4 added */
-/*                     3    nomatch flag support added */
-#define REVISION_MAX   4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Range as input support for IPv4 added */
+/*                             3    nomatch flag support added */
+/*                             4    Counters support added */
+#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,net");
 
 /* Type specific function prefix */
@@ -46,7 +47,7 @@ MODULE_ALIAS("ip_set_hash:ip,port,net");
 #define IP_SET_HASH_WITH_PROTO
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipportnet4_elem {
@@ -58,37 +59,6 @@ struct hash_ipportnet4_elem {
        u8 proto;
 };
 
-struct hash_ipportnet4t_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       unsigned long timeout;
-};
-
-struct hash_ipportnet4c_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportnet4ct_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -170,9 +140,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -195,9 +165,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
-       u32 ip2_from, ip2_to, ip2_last, ip2;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+       u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -272,7 +242,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip > ip_to)
                        swap(ip, ip_to);
        } else if (tb[IPSET_ATTR_CIDR]) {
-               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
@@ -306,9 +276,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                                                       : port;
                for (; p <= port_to; p++) {
                        e.port = htons(p);
-                       ip2 = retried
-                             && ip == ntohl(h->next.ip)
-                             && p == ntohs(h->next.port)
+                       ip2 = retried &&
+                             ip == ntohl(h->next.ip) &&
+                             p == ntohs(h->next.port)
                                ? ntohl(h->next.ip2) : ip2_from;
                        while (!after(ip2, ip2_to)) {
                                e.ip2 = htonl(ip2);
@@ -328,7 +298,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportnet6_elem {
        union nf_inet_addr ip;
@@ -339,37 +309,6 @@ struct hash_ipportnet6_elem {
        u8 proto;
 };
 
-struct hash_ipportnet6t_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       unsigned long timeout;
-};
-
-struct hash_ipportnet6c_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportnet6ct_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -454,9 +393,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -479,7 +418,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        u8 cidr;
@@ -574,8 +513,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
                          IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipportnet_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -600,6 +539,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 223e9f546d0fa3e414b7a53a839b56d6835093bb..8295cf4f9fdcfdb3d3da4b72792add9fc83156ad 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    Range as input support for IPv4 added */
-/*                     2    nomatch flag support added */
-#define REVISION_MAX   3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    Range as input support for IPv4 added */
+/*                             2    nomatch flag support added */
+/*                             3    Counters support added */
+#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net");
 
 /* Type specific function prefix */
 #define HTYPE          hash_net
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_net4_elem {
@@ -46,31 +47,6 @@ struct hash_net4_elem {
        u8 cidr;
 };
 
-struct hash_net4t_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       unsigned long timeout;
-};
-
-struct hash_net4c_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-};
-
-struct hash_net4ct_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -143,9 +119,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (e.cidr == 0)
                return -EINVAL;
@@ -165,8 +141,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -228,7 +204,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_net6_elem {
        union nf_inet_addr ip;
@@ -237,31 +213,6 @@ struct hash_net6_elem {
        u8 cidr;
 };
 
-struct hash_net6t_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       unsigned long timeout;
-};
-
-struct hash_net6c_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-};
-
-struct hash_net6ct_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -338,9 +289,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (e.cidr == 0)
                return -EINVAL;
@@ -357,10 +308,9 @@ static int
 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
               enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net6_elem e = { .cidr = HOST_MASK };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -406,8 +356,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_net_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -425,6 +375,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 7d798d5d5cd30a66de3d6fade0b4110eeba583f7..3f64a66bf5d9b78bfa551124cbc7bd3d7d8a8857 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    nomatch flag support added */
-/*                     2    /0 support added */
-#define REVISION_MAX   3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    nomatch flag support added */
+/*                             2    /0 support added */
+/*                             3    Counters support added */
+#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
 /* Interface name rbtree */
@@ -134,7 +135,7 @@ iface_add(struct rb_root *root, const char **iface)
 
 #define STREQ(a, b)    (strcmp(a, b) == 0)
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 struct hash_netiface4_elem_hashed {
        __be32 ip;
@@ -144,7 +145,7 @@ struct hash_netiface4_elem_hashed {
        u8 elem;
 };
 
-/* Member elements without timeout */
+/* Member elements */
 struct hash_netiface4_elem {
        __be32 ip;
        u8 physdev;
@@ -154,37 +155,6 @@ struct hash_netiface4_elem {
        const char *iface;
 };
 
-struct hash_netiface4t_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       unsigned long timeout;
-};
-
-struct hash_netiface4c_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-};
-
-struct hash_netiface4ct_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -265,10 +235,10 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
                .elem = 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        int ret;
 
        if (e.cidr == 0)
@@ -319,8 +289,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
        char iface[IFNAMSIZ];
        int ret;
 
@@ -399,7 +369,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netiface6_elem_hashed {
        union nf_inet_addr ip;
@@ -418,37 +388,6 @@ struct hash_netiface6_elem {
        const char *iface;
 };
 
-struct hash_netiface6t_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       unsigned long timeout;
-};
-
-struct hash_netiface6c_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-};
-
-struct hash_netiface6ct_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -534,10 +473,10 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
                .elem = 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        int ret;
 
        if (e.cidr == 0)
@@ -584,7 +523,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        char iface[IFNAMSIZ];
        int ret;
 
@@ -645,8 +584,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                          IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_netiface_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -668,6 +607,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
new file mode 100644 (file)
index 0000000..4260327
--- /dev/null
@@ -0,0 +1,483 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ * Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN     0
+#define IPSET_TYPE_REV_MAX     0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,net");
+
+/* Type specific function prefix */
+#define HTYPE          hash_netnet
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variants */
+
+/* Member elements  */
+struct hash_netnet4_elem {
+       union {
+               __be32 ip[2];
+               __be64 ipcmp;
+       };
+       u8 nomatch;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
+                    const struct hash_netnet4_elem *ip2,
+                    u32 *multi)
+{
+       return ip1->ipcmp == ip2->ipcmp &&
+              ip2->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
+{
+       elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
+                         struct hash_netnet4_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
+{
+       if (inner) {
+               elem->ip[1] &= ip_set_netmask(cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               elem->ip[0] &= ip_set_netmask(cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netnet4_data_list(struct sk_buff *skb,
+                   const struct hash_netnet4_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netnet4_data_next(struct hash_netnet4_elem *next,
+                   const struct hash_netnet4_elem *d)
+{
+       next->ipcmp = d->ipcmp;
+}
+
+#define MTYPE          hash_netnet4
+#define PF             4
+#define HOST_MASK      32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+              const struct xt_action_param *par,
+              enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet4_elem e = {
+               .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+               .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK,
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+       ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+       ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
+       e.ip[0] &= ip_set_netmask(e.cidr[0]);
+       e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet4_elem e = { .cidr[0] = HOST_MASK,
+                                      .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
+       u8 cidr, cidr2;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[0] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_CIDR2]) {
+               cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+               if (!cidr2 || cidr2 > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[1] = cidr2;
+       }
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] &&
+                                  tb[IPSET_ATTR_IP2_TO])) {
+               e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+               e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip_to = ip;
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip_to < ip)
+                       swap(ip, ip_to);
+               if (ip + UINT_MAX == ip_to)
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       ip2_to = ip2_from;
+       if (tb[IPSET_ATTR_IP2_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+               if (ret)
+                       return ret;
+               if (ip2_to < ip2_from)
+                       swap(ip2_from, ip2_to);
+               if (ip2_from + UINT_MAX == ip2_to)
+                       return -IPSET_ERR_HASH_RANGE;
+
+       }
+
+       if (retried)
+               ip = ntohl(h->next.ip[0]);
+
+       while (!after(ip, ip_to)) {
+               e.ip[0] = htonl(ip);
+               last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+               e.cidr[0] = cidr;
+               ip2 = (retried &&
+                      ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
+                                                  : ip2_from;
+               while (!after(ip2, ip2_to)) {
+                       e.ip[1] = htonl(ip2);
+                       last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
+                       e.cidr[1] = cidr2;
+                       ret = adtfn(set, &e, &ext, &ext, flags);
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+                       ip2 = last2 + 1;
+               }
+               ip = last + 1;
+       }
+       return ret;
+}
+
+/* IPv6 variants */
+
+struct hash_netnet6_elem {
+       union nf_inet_addr ip[2];
+       u8 nomatch;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
+                    const struct hash_netnet6_elem *ip2,
+                    u32 *multi)
+{
+       return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+              ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+              ip1->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
+{
+       elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
+                         struct hash_netnet6_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
+{
+       if (inner) {
+               ip6_netmask(&elem->ip[1], cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               ip6_netmask(&elem->ip[0], cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netnet6_data_list(struct sk_buff *skb,
+                   const struct hash_netnet6_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netnet6_data_next(struct hash_netnet4_elem *next,
+                   const struct hash_netnet6_elem *d)
+{
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE          hash_netnet6
+#define PF             6
+#define HOST_MASK      128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+              const struct xt_action_param *par,
+              enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet6_elem e = {
+               .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+               .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
+
+       ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+       ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet6_elem e = { .cidr[0] = HOST_MASK,
+                                      .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+               return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (tb[IPSET_ATTR_CIDR2])
+               e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+           e.cidr[1] > HOST_MASK)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       ret = adtfn(set, &e, &ext, &ext, flags);
+
+       return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+              ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static struct ip_set_type hash_netnet_type __read_mostly = {
+       .name           = "hash:net,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = NFPROTO_UNSPEC,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
+       .create         = hash_netnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2_TO]     = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+               [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
+               [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netnet_init(void)
+{
+       return ip_set_type_register(&hash_netnet_type);
+}
+
+static void __exit
+hash_netnet_fini(void)
+{
+       ip_set_type_unregister(&hash_netnet_type);
+}
+
+module_init(hash_netnet_init);
+module_exit(hash_netnet_fini);
index 09d6690bee6fd33891c4a00bbbb909f2bbf7731b..7097fb0141bf6e1363ca0b0342451e66c34773b4 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-/*                     2    Range as input support for IPv4 added */
-/*                     3    nomatch flag support added */
-#define REVISION_MAX   4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Range as input support for IPv4 added */
+/*                             3    nomatch flag support added */
+/*                             4    Counters support added */
+#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,port");
 
 /* Type specific function prefix */
@@ -45,7 +46,7 @@ MODULE_ALIAS("ip_set_hash:net,port");
  */
 #define IP_SET_HASH_WITH_NETS_PACKED
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_netport4_elem {
@@ -56,34 +57,6 @@ struct hash_netport4_elem {
        u8 nomatch:1;
 };
 
-struct hash_netport4t_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       unsigned long timeout;
-};
-
-struct hash_netport4c_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-};
-
-struct hash_netport4ct_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -162,9 +135,9 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -186,8 +159,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 port, port_to, p = 0, ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -287,7 +260,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netport6_elem {
        union nf_inet_addr ip;
@@ -297,34 +270,6 @@ struct hash_netport6_elem {
        u8 nomatch:1;
 };
 
-struct hash_netport6t_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       unsigned long timeout;
-};
-
-struct hash_netport6c_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-};
-
-struct hash_netport6ct_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -407,9 +352,9 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -431,7 +376,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport6_elem e = { .cidr = HOST_MASK  - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        u8 cidr;
@@ -518,8 +463,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_netport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -542,6 +487,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
new file mode 100644 (file)
index 0000000..363fab9
--- /dev/null
@@ -0,0 +1,588 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN     0
+#define IPSET_TYPE_REV_MAX     0 /* Comments support added */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,port,net");
+
+/* Type specific function prefix */
+#define HTYPE          hash_netportnet
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variant */
+
+/* Member elements */
+struct hash_netportnet4_elem {
+       union {
+               __be32 ip[2];
+               __be64 ipcmp;
+       };
+       __be16 port;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+       u8 nomatch:1;
+       u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
+                          const struct hash_netportnet4_elem *ip2,
+                          u32 *multi)
+{
+       return ip1->ipcmp == ip2->ipcmp &&
+              ip1->ccmp == ip2->ccmp &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
+{
+       elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
+                               struct hash_netportnet4_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
+                             u8 cidr, bool inner)
+{
+       if (inner) {
+               elem->ip[1] &= ip_set_netmask(cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               elem->ip[0] &= ip_set_netmask(cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netportnet4_data_list(struct sk_buff *skb,
+                         const struct hash_netportnet4_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
+                         const struct hash_netportnet4_elem *d)
+{
+       next->ipcmp = d->ipcmp;
+       next->port = d->port;
+}
+
+#define MTYPE          hash_netportnet4
+#define PF             4
+#define HOST_MASK      32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    const struct xt_action_param *par,
+                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet4_elem e = {
+               .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+               .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+                                &e.port, &e.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+       ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
+       e.ip[0] &= ip_set_netmask(e.cidr[0]);
+       e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet4_elem e = { .cidr[0] = HOST_MASK,
+                                          .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+       u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
+       bool with_ports = false;
+       u8 cidr, cidr2;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[0] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_CIDR2]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[1] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_PORT])
+               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+               with_ports = ip_set_proto_with_ports(e.proto);
+
+               if (e.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       if (!(with_ports || e.proto == IPPROTO_ICMP))
+               e.port = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+       if (adt == IPSET_TEST ||
+           !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
+               e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+               e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip_to = ip;
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+               if (unlikely(ip + UINT_MAX == ip_to))
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       port_to = port = ntohs(e.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       }
+
+       ip2_to = ip2_from;
+       if (tb[IPSET_ATTR_IP2_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+               if (ret)
+                       return ret;
+               if (ip2_from > ip2_to)
+                       swap(ip2_from, ip2_to);
+               if (unlikely(ip2_from + UINT_MAX == ip2_to))
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       if (retried)
+               ip = ntohl(h->next.ip[0]);
+
+       while (!after(ip, ip_to)) {
+               e.ip[0] = htonl(ip);
+               ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+               e.cidr[0] = cidr;
+               p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
+                                                         : port;
+               for (; p <= port_to; p++) {
+                       e.port = htons(p);
+                       ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
+                              p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
+                                                        : ip2_from;
+                       while (!after(ip2, ip2_to)) {
+                               e.ip[1] = htonl(ip2);
+                               ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+                                                               &cidr2);
+                               e.cidr[1] = cidr2;
+                               ret = adtfn(set, &e, &ext, &ext, flags);
+                               if (ret && !ip_set_eexist(ret, flags))
+                                       return ret;
+                               else
+                                       ret = 0;
+                               ip2 = ip2_last + 1;
+                       }
+               }
+               ip = ip_last + 1;
+       }
+       return ret;
+}
+
+/* IPv6 variant */
+
+struct hash_netportnet6_elem {
+       union nf_inet_addr ip[2];
+       __be16 port;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+       u8 nomatch:1;
+       u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
+                          const struct hash_netportnet6_elem *ip2,
+                          u32 *multi)
+{
+       return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+              ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+              ip1->ccmp == ip2->ccmp &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
+{
+       elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
+                               struct hash_netportnet6_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
+                             u8 cidr, bool inner)
+{
+       if (inner) {
+               ip6_netmask(&elem->ip[1], cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               ip6_netmask(&elem->ip[0], cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netportnet6_data_list(struct sk_buff *skb,
+                         const struct hash_netportnet6_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
+                         const struct hash_netportnet6_elem *d)
+{
+       next->port = d->port;
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE          hash_netportnet6
+#define PF             6
+#define HOST_MASK      128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    const struct xt_action_param *par,
+                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet6_elem e = {
+               .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+               .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+                                &e.port, &e.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+       ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet6_elem e = { .cidr[0] = HOST_MASK,
+                                          .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 port, port_to;
+       bool with_ports = false;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+               return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (tb[IPSET_ATTR_CIDR2])
+               e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+                    e.cidr[1] > HOST_MASK))
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       if (tb[IPSET_ATTR_PORT])
+               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+               with_ports = ip_set_proto_with_ports(e.proto);
+
+               if (e.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       if (!(with_ports || e.proto == IPPROTO_ICMPV6))
+               e.port = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(e.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       if (retried)
+               port = ntohs(h->next.port);
+       for (; port <= port_to; port++) {
+               e.port = htons(port);
+               ret = adtfn(set, &e, &ext, &ext, flags);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static struct ip_set_type hash_netportnet_type __read_mostly = {
+       .name           = "hash:net,port,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+                         IPSET_TYPE_NOMATCH,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = NFPROTO_UNSPEC,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
+       .create         = hash_netportnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2_TO]     = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+               [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
+               [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netportnet_init(void)
+{
+       return ip_set_type_register(&hash_netportnet_type);
+}
+
+static void __exit
+hash_netportnet_fini(void)
+{
+       ip_set_type_unregister(&hash_netportnet_type);
+}
+
+module_init(hash_netportnet_init);
+module_exit(hash_netportnet_fini);
index 979b8c90e42201c656faf914e38b4b30bac2d298..ec6f6d15dded36429ee235196b48e828f7842fbc 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_list.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    Counters support added */
+#define IPSET_TYPE_REV_MAX     2 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_list:set");
 
 /* Member elements  */
@@ -28,28 +29,6 @@ struct set_elem {
        ip_set_id_t id;
 };
 
-struct sett_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-};
-
-struct setc_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-};
-
-struct setct_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 struct set_adt_elem {
        ip_set_id_t id;
        ip_set_id_t refid;
@@ -58,24 +37,14 @@ struct set_adt_elem {
 
 /* Type structure */
 struct list_set {
-       size_t dsize;           /* element size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        u32 size;               /* size of set list array */
-       u32 timeout;            /* timeout value */
        struct timer_list gc;   /* garbage collection */
+       struct net *net;        /* namespace */
        struct set_elem members[0]; /* the set members */
 };
 
-static inline struct set_elem *
-list_set_elem(const struct list_set *map, u32 id)
-{
-       return (struct set_elem *)((void *)map->members + id * map->dsize);
-}
-
-#define ext_timeout(e, m)      \
-(unsigned long *)((void *)(e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)      \
-(struct ip_set_counter *)((void *)(e) + (m)->offset[IPSET_OFFSET_COUNTER])
+#define list_set_elem(set, map, id)    \
+       (struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
 
 static int
 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
@@ -92,16 +61,16 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
        if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
                opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_test(e->id, skb, par, opt);
                if (ret > 0) {
                        if (SET_WITH_COUNTER(set))
-                               ip_set_update_counter(ext_counter(e, map),
+                               ip_set_update_counter(ext_counter(e, set),
                                                      ext, &opt->ext,
                                                      cmdflags);
                        return ret;
@@ -121,11 +90,11 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_add(e->id, skb, par, opt);
                if (ret == 0)
@@ -145,11 +114,11 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_del(e->id, skb, par, opt);
                if (ret == 0)
@@ -163,8 +132,7 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
              const struct xt_action_param *par,
              enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       struct list_set *map = set->data;
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        switch (adt) {
        case IPSET_TEST:
@@ -188,10 +156,10 @@ id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
        if (i >= map->size)
                return 0;
 
-       e = list_set_elem(map, i);
+       e = list_set_elem(set, map, i);
        return !!(e->id == id &&
                 !(SET_WITH_TIMEOUT(set) &&
-                  ip_set_timeout_expired(ext_timeout(e, map))));
+                  ip_set_timeout_expired(ext_timeout(e, set))));
 }
 
 static int
@@ -199,28 +167,36 @@ list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
             const struct ip_set_ext *ext)
 {
        struct list_set *map = set->data;
-       struct set_elem *e = list_set_elem(map, i);
+       struct set_elem *e = list_set_elem(set, map, i);
 
        if (e->id != IPSET_INVALID_ID) {
-               if (i == map->size - 1)
+               if (i == map->size - 1) {
                        /* Last element replaced: e.g. add new,before,last */
-                       ip_set_put_byindex(e->id);
-               else {
-                       struct set_elem *x = list_set_elem(map, map->size - 1);
+                       ip_set_put_byindex(map->net, e->id);
+                       ip_set_ext_destroy(set, e);
+               } else {
+                       struct set_elem *x = list_set_elem(set, map,
+                                                          map->size - 1);
 
                        /* Last element pushed off */
-                       if (x->id != IPSET_INVALID_ID)
-                               ip_set_put_byindex(x->id);
-                       memmove(list_set_elem(map, i + 1), e,
-                               map->dsize * (map->size - (i + 1)));
+                       if (x->id != IPSET_INVALID_ID) {
+                               ip_set_put_byindex(map->net, x->id);
+                               ip_set_ext_destroy(set, x);
+                       }
+                       memmove(list_set_elem(set, map, i + 1), e,
+                               set->dsize * (map->size - (i + 1)));
+                       /* Extensions must be initialized to zero */
+                       memset(e, 0, set->dsize);
                }
        }
 
        e->id = d->id;
        if (SET_WITH_TIMEOUT(set))
-               ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+               ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(e, map), ext);
+               ip_set_init_counter(ext_counter(e, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(e, set), ext);
        return 0;
 }
 
@@ -228,16 +204,17 @@ static int
 list_set_del(struct ip_set *set, u32 i)
 {
        struct list_set *map = set->data;
-       struct set_elem *e = list_set_elem(map, i);
+       struct set_elem *e = list_set_elem(set, map, i);
 
-       ip_set_put_byindex(e->id);
+       ip_set_put_byindex(map->net, e->id);
+       ip_set_ext_destroy(set, e);
 
        if (i < map->size - 1)
-               memmove(e, list_set_elem(map, i + 1),
-                       map->dsize * (map->size - (i + 1)));
+               memmove(e, list_set_elem(set, map, i + 1),
+                       set->dsize * (map->size - (i + 1)));
 
        /* Last element */
-       e = list_set_elem(map, map->size - 1);
+       e = list_set_elem(set, map, map->size - 1);
        e->id = IPSET_INVALID_ID;
        return 0;
 }
@@ -247,13 +224,16 @@ set_cleanup_entries(struct ip_set *set)
 {
        struct list_set *map = set->data;
        struct set_elem *e;
-       u32 i;
+       u32 i = 0;
 
-       for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+       while (i < map->size) {
+               e = list_set_elem(set, map, i);
                if (e->id != IPSET_INVALID_ID &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        list_set_del(set, i);
+                       /* Check element moved to position i in next loop */
+               else
+                       i++;
        }
 }
 
@@ -268,11 +248,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
+                        ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                else if (e->id != d->id)
                        continue;
@@ -299,14 +279,14 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        bool flag_exist = flags & IPSET_FLAG_EXIST;
        u32 i, ret = 0;
 
+       if (SET_WITH_TIMEOUT(set))
+               set_cleanup_entries(set);
+
        /* Check already added element */
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        goto insert;
-               else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
-                       continue;
                else if (e->id != d->id)
                        continue;
 
@@ -319,18 +299,22 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
                        /* Can't re-add */
                        return -IPSET_ERR_EXIST;
                /* Update extensions */
+               ip_set_ext_destroy(set, e);
+
                if (SET_WITH_TIMEOUT(set))
-                       ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+                       ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
                if (SET_WITH_COUNTER(set))
-                       ip_set_init_counter(ext_counter(e, map), ext);
+                       ip_set_init_counter(ext_counter(e, set), ext);
+               if (SET_WITH_COMMENT(set))
+                       ip_set_init_comment(ext_comment(e, set), ext);
                /* Set is already added to the list */
-               ip_set_put_byindex(d->id);
+               ip_set_put_byindex(map->net, d->id);
                return 0;
        }
 insert:
        ret = -IPSET_ERR_LIST_FULL;
        for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
                                : list_set_add(set, i, d, ext);
@@ -355,12 +339,12 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        u32 i;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return d->before != 0 ? -IPSET_ERR_REF_EXIST
                                              : -IPSET_ERR_EXIST;
                else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
+                        ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                else if (e->id != d->id)
                        continue;
@@ -386,7 +370,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        struct list_set *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        struct ip_set *s;
        int ret = 0;
 
@@ -403,7 +387,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
-       e.id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+       e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
        if (e.id == IPSET_INVALID_ID)
                return -IPSET_ERR_NAME;
        /* "Loop detection" */
@@ -423,7 +407,8 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (tb[IPSET_ATTR_NAMEREF]) {
-               e.refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+               e.refid = ip_set_get_byname(map->net,
+                                           nla_data(tb[IPSET_ATTR_NAMEREF]),
                                            &s);
                if (e.refid == IPSET_INVALID_ID) {
                        ret = -IPSET_ERR_NAMEREF;
@@ -439,9 +424,9 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
 
 finish:
        if (e.refid != IPSET_INVALID_ID)
-               ip_set_put_byindex(e.refid);
+               ip_set_put_byindex(map->net, e.refid);
        if (adt != IPSET_ADD || ret)
-               ip_set_put_byindex(e.id);
+               ip_set_put_byindex(map->net, e.id);
 
        return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -454,9 +439,10 @@ list_set_flush(struct ip_set *set)
        u32 i;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id != IPSET_INVALID_ID) {
-                       ip_set_put_byindex(e->id);
+                       ip_set_put_byindex(map->net, e->id);
+                       ip_set_ext_destroy(set, e);
                        e->id = IPSET_INVALID_ID;
                }
        }
@@ -485,14 +471,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
        if (!nested)
                goto nla_put_failure;
        if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
-           (SET_WITH_TIMEOUT(set) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-           (SET_WITH_COUNTER(set) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))) ||
            nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
-                         htonl(sizeof(*map) + map->size * map->dsize)))
+                         htonl(sizeof(*map) + map->size * set->dsize)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -515,11 +498,11 @@ list_set_list(const struct ip_set *set,
                return -EMSGSIZE;
        for (; cb->args[2] < map->size; cb->args[2]++) {
                i = cb->args[2];
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        goto finish;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested) {
@@ -530,15 +513,9 @@ list_set_list(const struct ip_set *set,
                                goto nla_put_failure;
                }
                if (nla_put_string(skb, IPSET_ATTR_NAME,
-                                  ip_set_name_byindex(e->id)))
-                       goto nla_put_failure;
-               if (SET_WITH_TIMEOUT(set) &&
-                   nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                 htonl(ip_set_timeout_get(
-                                               ext_timeout(e, map)))))
+                                  ip_set_name_byindex(map->net, e->id)))
                        goto nla_put_failure;
-               if (SET_WITH_COUNTER(set) &&
-                   ip_set_put_counter(skb, ext_counter(e, map)))
+               if (ip_set_put_extensions(skb, set, e, true))
                        goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
@@ -550,11 +527,11 @@ finish:
 
 nla_put_failure:
        nla_nest_cancel(skb, nested);
-       ipset_nest_end(skb, atd);
        if (unlikely(i == first)) {
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, atd);
        return 0;
 }
 
@@ -565,7 +542,7 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
        const struct list_set *y = b->data;
 
        return x->size == y->size &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -594,7 +571,7 @@ list_set_gc(unsigned long ul_set)
        set_cleanup_entries(set);
        write_unlock_bh(&set->lock);
 
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
@@ -606,43 +583,40 @@ list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&map->gc);
        map->gc.data = (unsigned long) set;
        map->gc.function = gc;
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
 /* Create list:set type of sets */
 
-static struct list_set *
-init_list_set(struct ip_set *set, u32 size, size_t dsize,
-             unsigned long timeout)
+static bool
+init_list_set(struct net *net, struct ip_set *set, u32 size)
 {
        struct list_set *map;
        struct set_elem *e;
        u32 i;
 
-       map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+       map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL);
        if (!map)
-               return NULL;
+               return false;
 
        map->size = size;
-       map->dsize = dsize;
-       map->timeout = timeout;
+       map->net = net;
        set->data = map;
 
        for (i = 0; i < size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                e->id = IPSET_INVALID_ID;
        }
 
-       return map;
+       return true;
 }
 
 static int
-list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+               u32 flags)
 {
-       struct list_set *map;
-       u32 size = IP_SET_LIST_DEFAULT_SIZE, cadt_flags = 0;
-       unsigned long timeout = 0;
+       u32 size = IP_SET_LIST_DEFAULT_SIZE;
 
        if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
@@ -654,45 +628,13 @@ list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        if (size < IP_SET_LIST_MIN_SIZE)
                size = IP_SET_LIST_MIN_SIZE;
 
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (tb[IPSET_ATTR_TIMEOUT])
-               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
        set->variant = &set_variant;
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map = init_list_set(set, size,
-                                       sizeof(struct setct_elem), timeout);
-                       if (!map)
-                               return -ENOMEM;
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct setct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct setct_elem, counter);
-                       list_set_gc_init(set, list_set_gc);
-               } else {
-                       map = init_list_set(set, size,
-                                           sizeof(struct setc_elem), 0);
-                       if (!map)
-                               return -ENOMEM;
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct setc_elem, counter);
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map = init_list_set(set, size,
-                                   sizeof(struct sett_elem), timeout);
-               if (!map)
-                       return -ENOMEM;
-               set->extensions |= IPSET_EXT_TIMEOUT;
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct sett_elem, timeout);
+       set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
+       if (!init_list_set(net, set, size))
+               return -ENOMEM;
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                list_set_gc_init(set, list_set_gc);
-       } else {
-               map = init_list_set(set, size, sizeof(struct set_elem), 0);
-               if (!map)
-                       return -ENOMEM;
        }
        return 0;
 }
@@ -703,8 +645,8 @@ static struct ip_set_type list_set_type __read_mostly = {
        .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = list_set_create,
        .create_policy  = {
                [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
@@ -721,6 +663,7 @@ static struct ip_set_type list_set_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 4f69e83ff836b0ec415342772055a58a52c25fc4..34fda62f40f61bd210b93a89a570d1da07f2ede2 100644 (file)
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                struct ip_vs_cpu_stats *s;
+               struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
                s->ustats.inpkts++;
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                s->ustats.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
-               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               rcu_read_lock();
+               svc = rcu_dereference(dest->svc);
+               s = this_cpu_ptr(svc->stats.cpustats);
                s->ustats.inpkts++;
                u64_stats_update_begin(&s->syncp);
                s->ustats.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
+               rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
                s->ustats.inpkts++;
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                struct ip_vs_cpu_stats *s;
+               struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
                s->ustats.outpkts++;
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                s->ustats.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
-               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               rcu_read_lock();
+               svc = rcu_dereference(dest->svc);
+               s = this_cpu_ptr(svc->stats.cpustats);
                s->ustats.outpkts++;
                u64_stats_update_begin(&s->syncp);
                s->ustats.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
+               rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
                s->ustats.outpkts++;
@@ -1231,11 +1239,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
             const struct net_device *in, const struct net_device *out,
             int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET);
+       return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1243,11 +1251,11 @@ ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET);
+       return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1258,11 +1266,11 @@ ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
             const struct net_device *in, const struct net_device *out,
             int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET6);
+       return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1270,11 +1278,11 @@ ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET6);
+       return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1725,12 +1733,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
  *     Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET);
+       return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1738,11 +1746,11 @@ ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET);
+       return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1752,7 +1760,7 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
  * Copy info from first fragment, to the rest of them.
  */
 static unsigned int
-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_preroute_frag6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
@@ -1784,12 +1792,12 @@ ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET6);
+       return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1797,11 +1805,11 @@ ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET6);
+       return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1817,7 +1825,7 @@ ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
  *      and send them to ip_vs_in_icmp.
  */
 static unsigned int
-ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
@@ -1834,12 +1842,12 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
        if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
-       return ip_vs_in_icmp(skb, &r, hooknum);
+       return ip_vs_in_icmp(skb, &r, ops->hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
-ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in, const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
@@ -1858,7 +1866,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
        if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
-       return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
+       return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
 }
 #endif
 
index c8148e48738657d63810a1dade924267dc032d01..a3df9bddc4f76251a8722792d8e9f15478546ac1 100644 (file)
@@ -460,7 +460,7 @@ static inline void
 __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
 {
        atomic_inc(&svc->refcnt);
-       dest->svc = svc;
+       rcu_assign_pointer(dest->svc, svc);
 }
 
 static void ip_vs_service_free(struct ip_vs_service *svc)
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
        kfree(svc);
 }
 
-static void
-__ip_vs_unbind_svc(struct ip_vs_dest *dest)
+static void ip_vs_service_rcu_free(struct rcu_head *head)
 {
-       struct ip_vs_service *svc = dest->svc;
+       struct ip_vs_service *svc;
+
+       svc = container_of(head, struct ip_vs_service, rcu_head);
+       ip_vs_service_free(svc);
+}
 
-       dest->svc = NULL;
+static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
+{
        if (atomic_dec_and_test(&svc->refcnt)) {
                IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port));
-               ip_vs_service_free(svc);
+               if (do_delay)
+                       call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
+               else
+                       ip_vs_service_free(svc);
        }
 }
 
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                              IP_VS_DBG_ADDR(svc->af, &dest->addr),
                              ntohs(dest->port),
                              atomic_read(&dest->refcnt));
-               /* We can not reuse dest while in grace period
-                * because conns still can use dest->svc
-                */
-               if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
-                       continue;
                if (dest->af == svc->af &&
                    ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
                    dest->port == dport &&
@@ -697,8 +699,10 @@ out:
 
 static void ip_vs_dest_free(struct ip_vs_dest *dest)
 {
+       struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
+
        __ip_vs_dst_cache_reset(dest);
-       __ip_vs_unbind_svc(dest);
+       __ip_vs_svc_put(svc, false);
        free_percpu(dest->stats.cpustats);
        kfree(dest);
 }
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                    struct ip_vs_dest_user_kern *udest, int add)
 {
        struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       struct ip_vs_service *old_svc;
        struct ip_vs_scheduler *sched;
        int conn_flags;
 
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        atomic_set(&dest->conn_flags, conn_flags);
 
        /* bind the service */
-       if (!dest->svc) {
+       old_svc = rcu_dereference_protected(dest->svc, 1);
+       if (!old_svc) {
                __ip_vs_bind_svc(dest, svc);
        } else {
-               if (dest->svc != svc) {
-                       __ip_vs_unbind_svc(dest);
+               if (old_svc != svc) {
                        ip_vs_zero_stats(&dest->stats);
                        __ip_vs_bind_svc(dest, svc);
+                       __ip_vs_svc_put(old_svc, true);
                }
        }
 
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
        return 0;
 }
 
-static void ip_vs_dest_wait_readers(struct rcu_head *head)
-{
-       struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
-                                              rcu_head);
-
-       /* End of grace period after unlinking */
-       clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
-}
-
-
 /*
  *     Delete a destination (must be already unlinked from the service)
  */
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
         */
        ip_vs_rs_unhash(dest);
 
-       if (!cleanup) {
-               set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
-               call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
-       }
-
        spin_lock_bh(&ipvs->dest_trash_lock);
        IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
                      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
                      atomic_read(&dest->refcnt));
        if (list_empty(&ipvs->dest_trash) && !cleanup)
                mod_timer(&ipvs->dest_trash_timer,
-                         jiffies + IP_VS_DEST_TRASH_PERIOD);
+                         jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
        /* dest lives in trash without reference */
        list_add(&dest->t_list, &ipvs->dest_trash);
+       dest->idle_start = 0;
        spin_unlock_bh(&ipvs->dest_trash_lock);
        ip_vs_dest_put(dest);
 }
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
        struct net *net = (struct net *) data;
        struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_dest *dest, *next;
+       unsigned long now = jiffies;
 
        spin_lock(&ipvs->dest_trash_lock);
        list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
-               /* Skip if dest is in grace period */
-               if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
-                       continue;
                if (atomic_read(&dest->refcnt) > 0)
                        continue;
+               if (dest->idle_start) {
+                       if (time_before(now, dest->idle_start +
+                                            IP_VS_DEST_TRASH_PERIOD))
+                               continue;
+               } else {
+                       dest->idle_start = max(1UL, now);
+                       continue;
+               }
                IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
                              dest->vfwmark,
-                             IP_VS_DBG_ADDR(dest->svc->af, &dest->addr),
+                             IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port));
                list_del(&dest->t_list);
                ip_vs_dest_free(dest);
        }
        if (!list_empty(&ipvs->dest_trash))
                mod_timer(&ipvs->dest_trash_timer,
-                         jiffies + IP_VS_DEST_TRASH_PERIOD);
+                         jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
        spin_unlock(&ipvs->dest_trash_lock);
 }
 
@@ -1320,14 +1318,6 @@ out:
        return ret;
 }
 
-static void ip_vs_service_rcu_free(struct rcu_head *head)
-{
-       struct ip_vs_service *svc;
-
-       svc = container_of(head, struct ip_vs_service, rcu_head);
-       ip_vs_service_free(svc);
-}
-
 /*
  *     Delete a service from the service list
  *     - The service must be unlinked, unlocked and not referenced!
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
        /*
         *    Free the service if nobody refers to it
         */
-       if (atomic_dec_and_test(&svc->refcnt)) {
-               IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
-                             svc->fwmark,
-                             IP_VS_DBG_ADDR(svc->af, &svc->addr),
-                             ntohs(svc->port));
-               call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
-       }
+       __ip_vs_svc_put(svc, true);
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
index 6bee6d0c73a52e93e1413162b4db971340fa3312..1425e9a924c4f64429637bc49cbde204b0bb1921 100644 (file)
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
                                 struct ip_vs_cpu_stats __percpu *stats)
 {
        int i;
+       bool add = false;
 
        for_each_possible_cpu(i) {
                struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
                unsigned int start;
                __u64 inbytes, outbytes;
-               if (i) {
+               if (add) {
                        sum->conns += s->ustats.conns;
                        sum->inpkts += s->ustats.inpkts;
                        sum->outpkts += s->ustats.outpkts;
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
                        sum->inbytes += inbytes;
                        sum->outbytes += outbytes;
                } else {
+                       add = true;
                        sum->conns = s->ustats.conns;
                        sum->inpkts = s->ustats.inpkts;
                        sum->outpkts = s->ustats.outpkts;
index 1383b0eadc0e777d5ff6dbce2a2ded451a3ae296..eff13c94498e068173c66b6a481a6264d038ffea 100644 (file)
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
        struct hlist_node       list;
        int                     af;             /* address family */
        union nf_inet_addr      addr;           /* destination IP address */
-       struct ip_vs_dest __rcu *dest;          /* real server (cache) */
+       struct ip_vs_dest       *dest;          /* real server (cache) */
        unsigned long           lastuse;        /* last used time */
        struct rcu_head         rcu_head;
 };
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
 };
 #endif
 
-static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
+static void ip_vs_lblc_rcu_free(struct rcu_head *head)
 {
-       struct ip_vs_dest *dest;
+       struct ip_vs_lblc_entry *en = container_of(head,
+                                                  struct ip_vs_lblc_entry,
+                                                  rcu_head);
 
-       hlist_del_rcu(&en->list);
-       /*
-        * We don't kfree dest because it is referred either by its service
-        * or the trash dest list.
-        */
-       dest = rcu_dereference_protected(en->dest, 1);
-       ip_vs_dest_put(dest);
-       kfree_rcu(en, rcu_head);
+       ip_vs_dest_put(en->dest);
+       kfree(en);
 }
 
+static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
+{
+       hlist_del_rcu(&en->list);
+       call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
+}
 
 /*
  *     Returns hash value for IPVS LBLC entry
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
        struct ip_vs_lblc_entry *en;
 
        en = ip_vs_lblc_get(dest->af, tbl, daddr);
-       if (!en) {
-               en = kmalloc(sizeof(*en), GFP_ATOMIC);
-               if (!en)
-                       return NULL;
-
-               en->af = dest->af;
-               ip_vs_addr_copy(dest->af, &en->addr, daddr);
-               en->lastuse = jiffies;
+       if (en) {
+               if (en->dest == dest)
+                       return en;
+               ip_vs_lblc_del(en);
+       }
+       en = kmalloc(sizeof(*en), GFP_ATOMIC);
+       if (!en)
+               return NULL;
 
-               ip_vs_dest_hold(dest);
-               RCU_INIT_POINTER(en->dest, dest);
+       en->af = dest->af;
+       ip_vs_addr_copy(dest->af, &en->addr, daddr);
+       en->lastuse = jiffies;
 
-               ip_vs_lblc_hash(tbl, en);
-       } else {
-               struct ip_vs_dest *old_dest;
+       ip_vs_dest_hold(dest);
+       en->dest = dest;
 
-               old_dest = rcu_dereference_protected(en->dest, 1);
-               if (old_dest != dest) {
-                       ip_vs_dest_put(old_dest);
-                       ip_vs_dest_hold(dest);
-                       /* No ordering constraints for refcnt */
-                       RCU_INIT_POINTER(en->dest, dest);
-               }
-       }
+       ip_vs_lblc_hash(tbl, en);
 
        return en;
 }
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
        tbl->dead = 1;
        for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                }
        }
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
                                        sysctl_lblc_expiration(svc)))
                                continue;
 
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                }
                spin_unlock(&svc->sched_lock);
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
                        if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
                                continue;
 
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                        goal--;
                }
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                 * free up entries from the trash at any time.
                 */
 
-               dest = rcu_dereference(en->dest);
+               dest = en->dest;
                if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
                    atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
                        goto out;
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
 {
        unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
        unregister_pernet_subsys(&ip_vs_lblc_ops);
-       synchronize_rcu();
+       rcu_barrier();
 }
 
 
index 5199448697f64fcf0da0e35dbc38903477a92861..0b8550089a2e580e7feba0723117f1c849048a39 100644 (file)
@@ -89,7 +89,7 @@
  */
 struct ip_vs_dest_set_elem {
        struct list_head        list;          /* list link */
-       struct ip_vs_dest __rcu *dest;         /* destination server */
+       struct ip_vs_dest       *dest;          /* destination server */
        struct rcu_head         rcu_head;
 };
 
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
 
        if (check) {
                list_for_each_entry(e, &set->list, list) {
-                       struct ip_vs_dest *d;
-
-                       d = rcu_dereference_protected(e->dest, 1);
-                       if (d == dest)
-                               /* already existed */
+                       if (e->dest == dest)
                                return;
                }
        }
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
                return;
 
        ip_vs_dest_hold(dest);
-       RCU_INIT_POINTER(e->dest, dest);
+       e->dest = dest;
 
        list_add_rcu(&e->list, &set->list);
        atomic_inc(&set->size);
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
        set->lastmod = jiffies;
 }
 
+static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
+{
+       struct ip_vs_dest_set_elem *e;
+
+       e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
+       ip_vs_dest_put(e->dest);
+       kfree(e);
+}
+
 static void
 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
 {
        struct ip_vs_dest_set_elem *e;
 
        list_for_each_entry(e, &set->list, list) {
-               struct ip_vs_dest *d;
-
-               d = rcu_dereference_protected(e->dest, 1);
-               if (d == dest) {
+               if (e->dest == dest) {
                        /* HIT */
                        atomic_dec(&set->size);
                        set->lastmod = jiffies;
-                       ip_vs_dest_put(dest);
                        list_del_rcu(&e->list);
-                       kfree_rcu(e, rcu_head);
+                       call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
                        break;
                }
        }
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
        struct ip_vs_dest_set_elem *e, *ep;
 
        list_for_each_entry_safe(e, ep, &set->list, list) {
-               struct ip_vs_dest *d;
-
-               d = rcu_dereference_protected(e->dest, 1);
-               /*
-                * We don't kfree dest because it is referred either
-                * by its service or by the trash dest list.
-                */
-               ip_vs_dest_put(d);
                list_del_rcu(&e->list);
-               kfree_rcu(e, rcu_head);
+               call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
        }
 }
 
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
        struct ip_vs_dest *dest, *least;
        int loh, doh;
 
-       if (set == NULL)
-               return NULL;
-
        /* select the first destination server, whose weight > 0 */
        list_for_each_entry_rcu(e, &set->list, list) {
-               least = rcu_dereference(e->dest);
+               least = e->dest;
                if (least->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
        /* find the destination with the weighted least load */
   nextstage:
        list_for_each_entry_continue_rcu(e, &set->list, list) {
-               dest = rcu_dereference(e->dest);
+               dest = e->dest;
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if ((loh * atomic_read(&dest->weight) >
-                    doh * atomic_read(&least->weight))
+               if (((__s64)loh * atomic_read(&dest->weight) >
+                    (__s64)doh * atomic_read(&least->weight))
                    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                        least = dest;
                        loh = doh;
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
 
        /* select the first destination server, whose weight > 0 */
        list_for_each_entry(e, &set->list, list) {
-               most = rcu_dereference_protected(e->dest, 1);
+               most = e->dest;
                if (atomic_read(&most->weight) > 0) {
                        moh = ip_vs_dest_conn_overhead(most);
                        goto nextstage;
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
        /* find the destination with the weighted most load */
   nextstage:
        list_for_each_entry_continue(e, &set->list, list) {
-               dest = rcu_dereference_protected(e->dest, 1);
+               dest = e->dest;
                doh = ip_vs_dest_conn_overhead(dest);
                /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
-               if ((moh * atomic_read(&dest->weight) <
-                    doh * atomic_read(&most->weight))
+               if (((__s64)moh * atomic_read(&dest->weight) <
+                    (__s64)doh * atomic_read(&most->weight))
                    && (atomic_read(&dest->weight) > 0)) {
                        most = dest;
                        moh = doh;
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
 {
        unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
        unregister_pernet_subsys(&ip_vs_lblcr_ops);
-       synchronize_rcu();
+       rcu_barrier();
 }
 
 
index d8d9860934fee1e7f59505eeefa094307b2d58c3..961a6de9bb29035458945185f488a5fc1209ba00 100644 (file)
@@ -40,7 +40,7 @@
 #include <net/ip_vs.h>
 
 
-static inline unsigned int
+static inline int
 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
 {
        /*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                  struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least = NULL;
-       unsigned int loh = 0, doh;
+       int loh = 0, doh;
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                }
 
                if (!least ||
-                   (loh * atomic_read(&dest->weight) >
-                    doh * atomic_read(&least->weight))) {
+                   ((__s64)loh * atomic_read(&dest->weight) >
+                    (__s64)doh * atomic_read(&least->weight))) {
                        least = dest;
                        loh = doh;
                }
index a5284cc3d88279923b6a28b1f8a8f87bfc2a6a0f..e446b9fa7424c6382cb65433447f3febe2b17eeb 100644 (file)
@@ -44,7 +44,7 @@
 #include <net/ip_vs.h>
 
 
-static inline unsigned int
+static inline int
 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
 {
        /*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                   struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least;
-       unsigned int loh, doh;
+       int loh, doh;
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
                doh = ip_vs_sed_dest_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
index 6dc1fa1288409067de8a19f53a32e174caa9adc9..b5b4650d50a9180f211e6cce82e3393ece2fc11c 100644 (file)
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                   struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least;
-       unsigned int loh, doh;
+       int loh, doh;
 
        IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
 
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
index bdebd03bc8cd448a75ca94db1c88ea461f72093a..70866d192efc9351f2fcafe3ef23ce0c131fc0ba 100644 (file)
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
                                           flowi6_to_flowi(&fl2), false)) {
-                               if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
-                                           sizeof(rt1->rt6i_gateway)) &&
+                               if (ipv6_addr_equal(rt6_nexthop(rt1),
+                                                   rt6_nexthop(rt2)) &&
                                    rt1->dst.dev == rt2->dst.dev)
                                        ret = 1;
                                dst_release(&rt2->dst);
index e0c4373b47478d4d72899d95166566db34a8bcfa..466410eaa482c2a3940d3768351e92be17a644ef 100644 (file)
@@ -52,66 +52,8 @@ module_param(sip_direct_media, int, 0600);
 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
                                   "endpoints only (default 1)");
 
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
-                               unsigned int dataoff, const char **dptr,
-                               unsigned int *datalen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
-
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
-                                  s16 off) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
-
-unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      unsigned int dataoff,
-                                      const char **dptr,
-                                      unsigned int *datalen,
-                                      struct nf_conntrack_expect *exp,
-                                      unsigned int matchoff,
-                                      unsigned int matchlen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
-
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
-                                    unsigned int dataoff,
-                                    const char **dptr,
-                                    unsigned int *datalen,
-                                    unsigned int sdpoff,
-                                    enum sdp_header_types type,
-                                    enum sdp_header_types term,
-                                    const union nf_inet_addr *addr)
-                                    __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
-
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
-                                    unsigned int dataoff,
-                                    const char **dptr,
-                                    unsigned int *datalen,
-                                    unsigned int matchoff,
-                                    unsigned int matchlen,
-                                    u_int16_t port) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
-
-unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-                                       unsigned int protoff,
-                                       unsigned int dataoff,
-                                       const char **dptr,
-                                       unsigned int *datalen,
-                                       unsigned int sdpoff,
-                                       const union nf_inet_addr *addr)
-                                       __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
-
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
-                                     unsigned int dataoff,
-                                     const char **dptr,
-                                     unsigned int *datalen,
-                                     struct nf_conntrack_expect *rtp_exp,
-                                     struct nf_conntrack_expect *rtcp_exp,
-                                     unsigned int mediaoff,
-                                     unsigned int medialen,
-                                     union nf_inet_addr *rtp_addr)
-                                     __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook);
+const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
 
 static int string_len(const struct nf_conn *ct, const char *dptr,
                      const char *limit, int *shift)
@@ -914,8 +856,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
        u_int16_t base_port;
        __be16 rtp_port, rtcp_port;
-       typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port;
-       typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media;
+       const struct nf_nat_sip_hooks *hooks;
 
        saddr = NULL;
        if (sip_direct_media) {
@@ -966,22 +907,23 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 #endif
                        skip_expect = 1;
        } while (!skip_expect);
-       rcu_read_unlock();
 
        base_port = ntohs(tuple.dst.u.udp.port) & ~1;
        rtp_port = htons(base_port);
        rtcp_port = htons(base_port + 1);
 
        if (direct_rtp) {
-               nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
-               if (nf_nat_sdp_port &&
-                   !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks &&
+                   !hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
                                     mediaoff, medialen, ntohs(rtp_port)))
                        goto err1;
        }
 
-       if (skip_expect)
+       if (skip_expect) {
+               rcu_read_unlock();
                return NF_ACCEPT;
+       }
 
        rtp_exp = nf_ct_expect_alloc(ct);
        if (rtp_exp == NULL)
@@ -995,10 +937,10 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
                          IPPROTO_UDP, NULL, &rtcp_port);
 
-       nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
-       if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
-               ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
-                                      rtp_exp, rtcp_exp,
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp)
+               ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
+                                      datalen, rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
                if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -1012,6 +954,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 err2:
        nf_ct_expect_put(rtp_exp);
 err1:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -1051,13 +994,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
        unsigned int caddr_len, maddr_len;
        unsigned int i;
        union nf_inet_addr caddr, maddr, rtp_addr;
+       const struct nf_nat_sip_hooks *hooks;
        unsigned int port;
        const struct sdp_media_type *t;
        int ret = NF_ACCEPT;
-       typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
-       typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
 
-       nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
 
        /* Find beginning of session description */
        if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1125,10 +1067,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Update media connection address if present */
-               if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
-                       ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+               if (maddr_len && hooks && ct->status & IPS_NAT_MASK) {
+                       ret = hooks->sdp_addr(skb, protoff, dataoff,
                                              dptr, datalen, mediaoff,
-                                             SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
+                                             SDP_HDR_CONNECTION,
+                                             SDP_HDR_MEDIA,
                                              &rtp_addr);
                        if (ret != NF_ACCEPT) {
                                nf_ct_helper_log(skb, ct, "cannot mangle SDP");
@@ -1139,10 +1082,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
        }
 
        /* Update session connection and owner addresses */
-       nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
-       if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sdp_session(skb, protoff, dataoff,
-                                        dptr, datalen, sdpoff, &rtp_addr);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK)
+               ret = hooks->sdp_session(skb, protoff, dataoff,
+                                        dptr, datalen, sdpoff,
+                                        &rtp_addr);
 
        return ret;
 }
@@ -1242,11 +1186,11 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
        unsigned int matchoff, matchlen;
        struct nf_conntrack_expect *exp;
        union nf_inet_addr *saddr, daddr;
+       const struct nf_nat_sip_hooks *hooks;
        __be16 port;
        u8 proto;
        unsigned int expires = 0;
        int ret;
-       typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
 
        /* Expected connections can not register again. */
        if (ct->status & IPS_EXPECTED)
@@ -1309,10 +1253,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
        exp->helper = nfct_help(ct)->helper;
        exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
 
-       nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
-       if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
-                                       exp, matchoff, matchlen);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK)
+               ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
+                                   exp, matchoff, matchlen);
        else {
                if (nf_ct_expect_related(exp) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
@@ -1515,7 +1459,7 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
                           unsigned int protoff, unsigned int dataoff,
                           const char **dptr, unsigned int *datalen)
 {
-       typeof(nf_nat_sip_hook) nf_nat_sip;
+       const struct nf_nat_sip_hooks *hooks;
        int ret;
 
        if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
@@ -1524,9 +1468,9 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
                ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-               nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
-               if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
-                                             dptr, datalen)) {
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks && !hooks->msg(skb, protoff, dataoff,
+                                        dptr, datalen)) {
                        nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
                        ret = NF_DROP;
                }
@@ -1546,7 +1490,6 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        s16 diff, tdiff = 0;
        int ret = NF_ACCEPT;
        bool term;
-       typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
        if (ctinfo != IP_CT_ESTABLISHED &&
            ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -1610,9 +1553,11 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        }
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-               nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
-               if (nf_nat_sip_seq_adjust)
-                       nf_nat_sip_seq_adjust(skb, protoff, tdiff);
+               const struct nf_nat_sip_hooks *hooks;
+
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks)
+                       hooks->seq_adjust(skb, protoff, tdiff);
        }
 
        return ret;
index 3deec997be89e32770750abc504922761d096935..61a3c927e63cf1c9f0f9b596155e5c8a74bcd71a 100644 (file)
 
 
 /* core.c */
-extern unsigned int nf_iterate(struct list_head *head,
-                               struct sk_buff *skb,
-                               unsigned int hook,
-                               const struct net_device *indev,
-                               const struct net_device *outdev,
-                               struct nf_hook_ops **elemp,
-                               int (*okfn)(struct sk_buff *),
-                               int hook_thresh);
+unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
+                       unsigned int hook, const struct net_device *indev,
+                       const struct net_device *outdev,
+                       struct nf_hook_ops **elemp,
+                       int (*okfn)(struct sk_buff *), int hook_thresh);
 
 /* nf_queue.c */
-extern int nf_queue(struct sk_buff *skb,
-                   struct nf_hook_ops *elem,
-                   u_int8_t pf, unsigned int hook,
-                   struct net_device *indev,
-                   struct net_device *outdev,
-                   int (*okfn)(struct sk_buff *),
-                   unsigned int queuenum);
-extern int __init netfilter_queue_init(void);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+            unsigned int hook, struct net_device *indev,
+            struct net_device *outdev, int (*okfn)(struct sk_buff *),
+            unsigned int queuenum);
+int __init netfilter_queue_init(void);
 
 /* nf_log.c */
-extern int __init netfilter_log_init(void);
+int __init netfilter_log_init(void);
 
 #endif
index 6f0f4f7f68a5f4a7f85c03b09a35b456bedc33a7..63a81540221169fcc6115e8c36cc84500beb809d 100644 (file)
@@ -432,6 +432,26 @@ nf_nat_setup_info(struct nf_conn *ct,
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
 
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        * Use reply in case it's already been mangled (eg local packet).
+        */
+       union nf_inet_addr ip =
+               (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+               ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
+               ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
+       struct nf_nat_range range = {
+               .flags          = NF_NAT_RANGE_MAP_IPS,
+               .min_addr       = ip,
+               .max_addr       = ip,
+       };
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
+
 /* Do packet manipulations according to nf_nat_setup_info. */
 unsigned int nf_nat_packet(struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
index f9790405b7fff5ead6004bf4266fe4e96abbc6cb..b4d691db955ed451954381e01baeed57ca588d11 100644 (file)
@@ -625,33 +625,26 @@ static struct nf_ct_helper_expectfn sip_nat = {
 
 static void __exit nf_nat_sip_fini(void)
 {
-       RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sip_hooks, NULL);
+
        nf_ct_helper_expectfn_unregister(&sip_nat);
        synchronize_rcu();
 }
 
+static const struct nf_nat_sip_hooks sip_hooks = {
+       .msg            = nf_nat_sip,
+       .seq_adjust     = nf_nat_sip_seq_adjust,
+       .expect         = nf_nat_sip_expect,
+       .sdp_addr       = nf_nat_sdp_addr,
+       .sdp_port       = nf_nat_sdp_port,
+       .sdp_session    = nf_nat_sdp_session,
+       .sdp_media      = nf_nat_sdp_media,
+};
+
 static int __init nf_nat_sip_init(void)
 {
-       BUG_ON(nf_nat_sip_hook != NULL);
-       BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
-       BUG_ON(nf_nat_sip_expect_hook != NULL);
-       BUG_ON(nf_nat_sdp_addr_hook != NULL);
-       BUG_ON(nf_nat_sdp_port_hook != NULL);
-       BUG_ON(nf_nat_sdp_session_hook != NULL);
-       BUG_ON(nf_nat_sdp_media_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
+       BUG_ON(nf_nat_sip_hooks != NULL);
+       RCU_INIT_POINTER(nf_nat_sip_hooks, &sip_hooks);
        nf_ct_helper_expectfn_register(&sip_nat);
        return 0;
 }
index 6fd967c6278c86fbc81ad232035891656750330a..cdf4567ba9b330929aec53eb1c75d57a7047106e 100644 (file)
@@ -24,7 +24,7 @@
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
 
-void
+bool
 synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                       const struct tcphdr *th, struct synproxy_options *opts)
 {
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
        u8 buf[40], *ptr;
 
        ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
-       BUG_ON(ptr == NULL);
+       if (ptr == NULL)
+               return false;
 
        opts->options = 0;
        while (length > 0) {
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
 
                switch (opcode) {
                case TCPOPT_EOL:
-                       return;
+                       return true;
                case TCPOPT_NOP:
                        length--;
                        continue;
                default:
                        opsize = *ptr++;
                        if (opsize < 2)
-                               return;
+                               return true;
                        if (opsize > length)
-                               return;
+                               return true;
 
                        switch (opcode) {
                        case TCPOPT_MSS:
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                        length -= opsize;
                }
        }
+       return true;
 }
 EXPORT_SYMBOL_GPL(synproxy_parse_options);
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
new file mode 100644 (file)
index 0000000..dcddc49
--- /dev/null
@@ -0,0 +1,3275 @@
+/*
+ * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+static LIST_HEAD(nf_tables_expressions);
+
+/**
+ *     nft_register_afinfo - register nf_tables address family info
+ *
+ *     @afi: address family info to register
+ *
+ *     Register the address family for use with nf_tables. Returns zero on
+ *     success or a negative errno code otherwise.
+ */
+int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
+{
+       INIT_LIST_HEAD(&afi->tables);
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&afi->list, &net->nft.af_info);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_afinfo);
+
+/**
+ *     nft_unregister_afinfo - unregister nf_tables address family info
+ *
+ *     @afi: address family info to unregister
+ *
+ *     Unregister the address family for use with nf_tables.
+ */
+void nft_unregister_afinfo(struct nft_af_info *afi)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&afi->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
+
+static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
+{
+       struct nft_af_info *afi;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (afi->family == family)
+                       return afi;
+       }
+       return NULL;
+}
+
+static struct nft_af_info *
+nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
+{
+       struct nft_af_info *afi;
+
+       afi = nft_afinfo_lookup(net, family);
+       if (afi != NULL)
+               return afi;
+#ifdef CONFIG_MODULES
+       if (autoload) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-afinfo-%u", family);
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               afi = nft_afinfo_lookup(net, family);
+               if (afi != NULL)
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+
+/*
+ * Tables
+ */
+
+static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+                                         const struct nlattr *nla)
+{
+       struct nft_table *table;
+
+       list_for_each_entry(table, &afi->tables, list) {
+               if (!nla_strcmp(nla, table->name))
+                       return table;
+       }
+       return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+                                               const struct nlattr *nla)
+{
+       struct nft_table *table;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       table = nft_table_lookup(afi, nla);
+       if (table != NULL)
+               return table;
+
+       return ERR_PTR(-ENOENT);
+}
+
+static inline u64 nf_tables_alloc_handle(struct nft_table *table)
+{
+       return ++table->hgenerator;
+}
+
+static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+
+static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+{
+       int i;
+
+       for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+               if (chain_type[family][i] != NULL &&
+                   !nla_strcmp(nla, chain_type[family][i]->name))
+                       return i;
+       }
+       return -1;
+}
+
+static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+                                      const struct nlattr *nla,
+                                      bool autoload)
+{
+       int type;
+
+       type = __nf_tables_chain_type_lookup(afi->family, nla);
+#ifdef CONFIG_MODULES
+       if (type < 0 && autoload) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-chain-%u-%*.s", afi->family,
+                              nla_len(nla)-1, (const char *)nla_data(nla));
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               type = __nf_tables_chain_type_lookup(afi->family, nla);
+       }
+#endif
+       return type;
+}
+
+static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+       [NFTA_TABLE_NAME]       = { .type = NLA_STRING },
+       [NFTA_TABLE_FLAGS]      = { .type = NLA_U32 },
+};
+
+static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                    int event, u32 flags, int family,
+                                    const struct nft_table *table)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
+           nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_table_notify(const struct sk_buff *oskb,
+                                 const struct nlmsghdr *nlh,
+                                 const struct nft_table *table,
+                                 int event, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       bool report;
+       int err;
+
+       report = nlh ? nlmsg_report(nlh) : false;
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
+                                       family, table);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_tables(struct sk_buff *skb,
+                                struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       if (idx < s_idx)
+                               goto cont;
+                       if (idx > s_idx)
+                               memset(&cb->args[1], 0,
+                                      sizeof(cb->args) - sizeof(cb->args[0]));
+                       if (nf_tables_fill_table_info(skb,
+                                                     NETLINK_CB(cb->skb).portid,
+                                                     cb->nlh->nlmsg_seq,
+                                                     NFT_MSG_NEWTABLE,
+                                                     NLM_F_MULTI,
+                                                     afi->family, table) < 0)
+                               goto done;
+cont:
+                       idx++;
+               }
+       }
+done:
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_tables,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid,
+                                       nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
+                                       family, table);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int nf_tables_table_enable(struct nft_table *table)
+{
+       struct nft_chain *chain;
+       int err, i = 0;
+
+       list_for_each_entry(chain, &table->chains, list) {
+               err = nf_register_hook(&nft_base_chain(chain)->ops);
+               if (err < 0)
+                       goto err;
+
+               i++;
+       }
+       return 0;
+err:
+       list_for_each_entry(chain, &table->chains, list) {
+               if (i-- <= 0)
+                       break;
+
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+       }
+       return err;
+}
+
+static int nf_tables_table_disable(struct nft_table *table)
+{
+       struct nft_chain *chain;
+
+       list_for_each_entry(chain, &table->chains, list)
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+       return 0;
+}
+
+static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[],
+                             struct nft_af_info *afi, struct nft_table *table)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       int family = nfmsg->nfgen_family, ret = 0;
+
+       if (nla[NFTA_TABLE_FLAGS]) {
+               __be32 flags;
+
+               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+               if (flags & ~NFT_TABLE_F_DORMANT)
+                       return -EINVAL;
+
+               if ((flags & NFT_TABLE_F_DORMANT) &&
+                   !(table->flags & NFT_TABLE_F_DORMANT)) {
+                       ret = nf_tables_table_disable(table);
+                       if (ret >= 0)
+                               table->flags |= NFT_TABLE_F_DORMANT;
+               } else if (!(flags & NFT_TABLE_F_DORMANT) &&
+                          table->flags & NFT_TABLE_F_DORMANT) {
+                       ret = nf_tables_table_enable(table);
+                       if (ret >= 0)
+                               table->flags &= ~NFT_TABLE_F_DORMANT;
+               }
+               if (ret < 0)
+                       goto err;
+       }
+
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+err:
+       return ret;
+}
+
+static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nlattr *name;
+       struct nft_af_info *afi;
+       struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, true);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       name = nla[NFTA_TABLE_NAME];
+       table = nf_tables_table_lookup(afi, name);
+       if (IS_ERR(table)) {
+               if (PTR_ERR(table) != -ENOENT)
+                       return PTR_ERR(table);
+               table = NULL;
+       }
+
+       if (table != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+               return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+       }
+
+       table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
+       if (table == NULL)
+               return -ENOMEM;
+
+       nla_strlcpy(table->name, name, nla_len(name));
+       INIT_LIST_HEAD(&table->chains);
+       INIT_LIST_HEAD(&table->sets);
+
+       if (nla[NFTA_TABLE_FLAGS]) {
+               __be32 flags;
+
+               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+               if (flags & ~NFT_TABLE_F_DORMANT) {
+                       kfree(table);
+                       return -EINVAL;
+               }
+
+               table->flags |= flags;
+       }
+
+       list_add_tail(&table->list, &afi->tables);
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+       return 0;
+}
+
+static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       struct nft_af_info *afi;
+       struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       if (table->use)
+               return -EBUSY;
+
+       list_del(&table->list);
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
+       kfree(table);
+       return 0;
+}
+
+int nft_register_chain_type(struct nf_chain_type *ctype)
+{
+       int err = 0;
+
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       if (chain_type[ctype->family][ctype->type] != NULL) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (!try_module_get(ctype->me))
+               goto out;
+
+       chain_type[ctype->family][ctype->type] = ctype;
+out:
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return err;
+}
+EXPORT_SYMBOL_GPL(nft_register_chain_type);
+
+void nft_unregister_chain_type(struct nf_chain_type *ctype)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       chain_type[ctype->family][ctype->type] = NULL;
+       module_put(ctype->me);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
+
+/*
+ * Chains
+ */
+
+static struct nft_chain *
+nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle)
+{
+       struct nft_chain *chain;
+
+       list_for_each_entry(chain, &table->chains, list) {
+               if (chain->handle == handle)
+                       return chain;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
+                                               const struct nlattr *nla)
+{
+       struct nft_chain *chain;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       list_for_each_entry(chain, &table->chains, list) {
+               if (!nla_strcmp(nla, chain->name))
+                       return chain;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
+       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING },
+       [NFTA_CHAIN_HANDLE]     = { .type = NLA_U64 },
+       [NFTA_CHAIN_NAME]       = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+       [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
+       [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_NUL_STRING },
+       [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
+       [NFTA_HOOK_HOOKNUM]     = { .type = NLA_U32 },
+       [NFTA_HOOK_PRIORITY]    = { .type = NLA_U32 },
+};
+
+static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
+{
+       struct nft_stats *cpu_stats, total;
+       struct nlattr *nest;
+       int cpu;
+
+       memset(&total, 0, sizeof(total));
+       for_each_possible_cpu(cpu) {
+               cpu_stats = per_cpu_ptr(stats, cpu);
+               total.pkts += cpu_stats->pkts;
+               total.bytes += cpu_stats->bytes;
+       }
+       nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
+           nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       return -ENOSPC;
+}
+
+static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                    int event, u32 flags, int family,
+                                    const struct nft_table *table,
+                                    const struct nft_chain *chain)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
+               goto nla_put_failure;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain = nft_base_chain(chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+               struct nlattr *nest;
+
+               nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
+               if (nest == NULL)
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
+                       goto nla_put_failure;
+               nla_nest_end(skb, nest);
+
+               if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
+                                htonl(basechain->policy)))
+                       goto nla_put_failure;
+
+               if (nla_put_string(skb, NFTA_CHAIN_TYPE,
+                       chain_type[ops->pf][nft_base_chain(chain)->type]->name))
+                               goto nla_put_failure;
+
+               if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+                       goto nla_put_failure;
+       }
+
+       if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_chain_notify(const struct sk_buff *oskb,
+                                 const struct nlmsghdr *nlh,
+                                 const struct nft_table *table,
+                                 const struct nft_chain *chain,
+                                 int event, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       bool report;
+       int err;
+
+       report = nlh ? nlmsg_report(nlh) : false;
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
+                                       table, chain);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_chains(struct sk_buff *skb,
+                                struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       list_for_each_entry(chain, &table->chains, list) {
+                               if (idx < s_idx)
+                                       goto cont;
+                               if (idx > s_idx)
+                                       memset(&cb->args[1], 0,
+                                              sizeof(cb->args) - sizeof(cb->args[0]));
+                               if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid,
+                                                             cb->nlh->nlmsg_seq,
+                                                             NFT_MSG_NEWCHAIN,
+                                                             NLM_F_MULTI,
+                                                             afi->family, table, chain) < 0)
+                                       goto done;
+cont:
+                               idx++;
+                       }
+               }
+       }
+done:
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+
+static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_chains,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid,
+                                       nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
+                                       family, table, chain);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int
+nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+       switch (ntohl(nla_get_be32(attr))) {
+       case NF_DROP:
+               chain->policy = NF_DROP;
+               break;
+       case NF_ACCEPT:
+               chain->policy = NF_ACCEPT;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+       [NFTA_COUNTER_PACKETS]  = { .type = NLA_U64 },
+       [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
+};
+
+static int
+nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+       struct nlattr *tb[NFTA_COUNTER_MAX+1];
+       struct nft_stats __percpu *newstats;
+       struct nft_stats *stats;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
+               return -EINVAL;
+
+       newstats = alloc_percpu(struct nft_stats);
+       if (newstats == NULL)
+               return -ENOMEM;
+
+       /* Restore old counters on this cpu, no problem. Per-cpu statistics
+        * are not exposed to userspace.
+        */
+       stats = this_cpu_ptr(newstats);
+       stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+       stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+
+       if (chain->stats) {
+               /* nfnl_lock is held, add some nfnl function for this, later */
+               struct nft_stats __percpu *oldstats =
+                       rcu_dereference_protected(chain->stats, 1);
+
+               rcu_assign_pointer(chain->stats, newstats);
+               synchronize_rcu();
+               free_percpu(oldstats);
+       } else
+               rcu_assign_pointer(chain->stats, newstats);
+
+       return 0;
+}
+
+static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nlattr * uninitialized_var(name);
+       const struct nft_af_info *afi;
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_base_chain *basechain = NULL;
+       struct nlattr *ha[NFTA_HOOK_MAX + 1];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       u64 handle = 0;
+       int err;
+       bool create;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, family, true);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       if (table->use == UINT_MAX)
+               return -EOVERFLOW;
+
+       chain = NULL;
+       name = nla[NFTA_CHAIN_NAME];
+
+       if (nla[NFTA_CHAIN_HANDLE]) {
+               handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+               chain = nf_tables_chain_lookup_byhandle(table, handle);
+               if (IS_ERR(chain))
+                       return PTR_ERR(chain);
+       } else {
+               chain = nf_tables_chain_lookup(table, name);
+               if (IS_ERR(chain)) {
+                       if (PTR_ERR(chain) != -ENOENT)
+                               return PTR_ERR(chain);
+                       chain = NULL;
+               }
+       }
+
+       if (chain != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+
+               if (nla[NFTA_CHAIN_HANDLE] && name &&
+                   !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
+                       return -EEXIST;
+
+               if (nla[NFTA_CHAIN_POLICY]) {
+                       if (!(chain->flags & NFT_BASE_CHAIN))
+                               return -EOPNOTSUPP;
+
+                       err = nf_tables_chain_policy(nft_base_chain(chain),
+                                                    nla[NFTA_CHAIN_POLICY]);
+                       if (err < 0)
+                               return err;
+               }
+
+               if (nla[NFTA_CHAIN_COUNTERS]) {
+                       if (!(chain->flags & NFT_BASE_CHAIN))
+                               return -EOPNOTSUPP;
+
+                       err = nf_tables_counters(nft_base_chain(chain),
+                                                nla[NFTA_CHAIN_COUNTERS]);
+                       if (err < 0)
+                               return err;
+               }
+
+               if (nla[NFTA_CHAIN_HANDLE] && name)
+                       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+               goto notify;
+       }
+
+       if (nla[NFTA_CHAIN_HOOK]) {
+               struct nf_hook_ops *ops;
+               nf_hookfn *hookfn;
+               u32 hooknum;
+               int type = NFT_CHAIN_T_DEFAULT;
+
+               if (nla[NFTA_CHAIN_TYPE]) {
+                       type = nf_tables_chain_type_lookup(afi,
+                                                          nla[NFTA_CHAIN_TYPE],
+                                                          create);
+                       if (type < 0)
+                               return -ENOENT;
+               }
+
+               err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
+                                      nft_hook_policy);
+               if (err < 0)
+                       return err;
+               if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+                   ha[NFTA_HOOK_PRIORITY] == NULL)
+                       return -EINVAL;
+
+               hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+               if (hooknum >= afi->nhooks)
+                       return -EINVAL;
+
+               hookfn = chain_type[family][type]->fn[hooknum];
+               if (hookfn == NULL)
+                       return -EOPNOTSUPP;
+
+               basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
+               if (basechain == NULL)
+                       return -ENOMEM;
+
+               basechain->type = type;
+               chain = &basechain->chain;
+
+               ops = &basechain->ops;
+               ops->pf         = family;
+               ops->owner      = afi->owner;
+               ops->hooknum    = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+               ops->priority   = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+               ops->priv       = chain;
+               ops->hook       = hookfn;
+               if (afi->hooks[ops->hooknum])
+                       ops->hook = afi->hooks[ops->hooknum];
+
+               chain->flags |= NFT_BASE_CHAIN;
+
+               if (nla[NFTA_CHAIN_POLICY]) {
+                       err = nf_tables_chain_policy(basechain,
+                                                    nla[NFTA_CHAIN_POLICY]);
+                       if (err < 0) {
+                               free_percpu(basechain->stats);
+                               kfree(basechain);
+                               return err;
+                       }
+               } else
+                       basechain->policy = NF_ACCEPT;
+
+               if (nla[NFTA_CHAIN_COUNTERS]) {
+                       err = nf_tables_counters(basechain,
+                                                nla[NFTA_CHAIN_COUNTERS]);
+                       if (err < 0) {
+                               free_percpu(basechain->stats);
+                               kfree(basechain);
+                               return err;
+                       }
+               } else {
+                       struct nft_stats __percpu *newstats;
+
+                       newstats = alloc_percpu(struct nft_stats);
+                       if (newstats == NULL)
+                               return -ENOMEM;
+
+                       rcu_assign_pointer(nft_base_chain(chain)->stats,
+                                          newstats);
+               }
+       } else {
+               chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+               if (chain == NULL)
+                       return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&chain->rules);
+       chain->handle = nf_tables_alloc_handle(table);
+       chain->net = net;
+       chain->table = table;
+       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN) {
+               err = nf_register_hook(&nft_base_chain(chain)->ops);
+               if (err < 0) {
+                       free_percpu(basechain->stats);
+                       kfree(basechain);
+                       return err;
+               }
+       }
+       list_add_tail(&chain->list, &table->chains);
+       table->use++;
+notify:
+       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
+                              family);
+       return 0;
+}
+
+static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+{
+       struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
+
+       BUG_ON(chain->use > 0);
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               free_percpu(nft_base_chain(chain)->stats);
+               kfree(nft_base_chain(chain));
+       } else
+               kfree(chain);
+}
+
+static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       if (!list_empty(&chain->rules))
+               return -EBUSY;
+
+       list_del(&chain->list);
+       table->use--;
+
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN)
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
+                              family);
+
+       /* Make sure all rule references are gone before this is released */
+       call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+       return 0;
+}
+
+static void nft_ctx_init(struct nft_ctx *ctx,
+                        const struct sk_buff *skb,
+                        const struct nlmsghdr *nlh,
+                        const struct nft_af_info *afi,
+                        const struct nft_table *table,
+                        const struct nft_chain *chain,
+                        const struct nlattr * const *nla)
+{
+       ctx->net   = sock_net(skb->sk);
+       ctx->skb   = skb;
+       ctx->nlh   = nlh;
+       ctx->afi   = afi;
+       ctx->table = table;
+       ctx->chain = chain;
+       ctx->nla   = nla;
+}
+
+/*
+ * Expressions
+ */
+
+/**
+ *     nft_register_expr - register nf_tables expr type
+ *     @ops: expr type
+ *
+ *     Registers the expr type for use with nf_tables. Returns zero on
+ *     success or a negative errno code otherwise.
+ */
+int nft_register_expr(struct nft_expr_type *type)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&type->list, &nf_tables_expressions);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_expr);
+
+/**
+ *     nft_unregister_expr - unregister nf_tables expr type
+ *     @ops: expr type
+ *
+ *     Unregisters the expr typefor use with nf_tables.
+ */
+void nft_unregister_expr(struct nft_expr_type *type)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&type->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_expr);
+
+static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+{
+       const struct nft_expr_type *type;
+
+       list_for_each_entry(type, &nf_tables_expressions, list) {
+               if (!nla_strcmp(nla, type->name))
+                       return type;
+       }
+       return NULL;
+}
+
+static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+{
+       const struct nft_expr_type *type;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       type = __nft_expr_type_get(nla);
+       if (type != NULL && try_module_get(type->owner))
+               return type;
+
+#ifdef CONFIG_MODULES
+       if (type == NULL) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-expr-%.*s",
+                              nla_len(nla), (char *)nla_data(nla));
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               if (__nft_expr_type_get(nla))
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
+       [NFTA_EXPR_NAME]        = { .type = NLA_STRING },
+       [NFTA_EXPR_DATA]        = { .type = NLA_NESTED },
+};
+
+static int nf_tables_fill_expr_info(struct sk_buff *skb,
+                                   const struct nft_expr *expr)
+{
+       if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
+               goto nla_put_failure;
+
+       if (expr->ops->dump) {
+               struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA);
+               if (data == NULL)
+                       goto nla_put_failure;
+               if (expr->ops->dump(skb, expr) < 0)
+                       goto nla_put_failure;
+               nla_nest_end(skb, data);
+       }
+
+       return skb->len;
+
+nla_put_failure:
+       return -1;
+};
+
+struct nft_expr_info {
+       const struct nft_expr_ops       *ops;
+       struct nlattr                   *tb[NFT_EXPR_MAXATTR + 1];
+};
+
+static int nf_tables_expr_parse(const struct nft_ctx *ctx,
+                               const struct nlattr *nla,
+                               struct nft_expr_info *info)
+{
+       const struct nft_expr_type *type;
+       const struct nft_expr_ops *ops;
+       struct nlattr *tb[NFTA_EXPR_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy);
+       if (err < 0)
+               return err;
+
+       type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+       if (IS_ERR(type))
+               return PTR_ERR(type);
+
+       if (tb[NFTA_EXPR_DATA]) {
+               err = nla_parse_nested(info->tb, type->maxattr,
+                                      tb[NFTA_EXPR_DATA], type->policy);
+               if (err < 0)
+                       goto err1;
+       } else
+               memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
+
+       if (type->select_ops != NULL) {
+               ops = type->select_ops(ctx,
+                                      (const struct nlattr * const *)info->tb);
+               if (IS_ERR(ops)) {
+                       err = PTR_ERR(ops);
+                       goto err1;
+               }
+       } else
+               ops = type->ops;
+
+       info->ops = ops;
+       return 0;
+
+err1:
+       module_put(type->owner);
+       return err;
+}
+
+static int nf_tables_newexpr(const struct nft_ctx *ctx,
+                            const struct nft_expr_info *info,
+                            struct nft_expr *expr)
+{
+       const struct nft_expr_ops *ops = info->ops;
+       int err;
+
+       expr->ops = ops;
+       if (ops->init) {
+               err = ops->init(ctx, expr, (const struct nlattr **)info->tb);
+               if (err < 0)
+                       goto err1;
+       }
+
+       return 0;
+
+err1:
+       expr->ops = NULL;
+       return err;
+}
+
+static void nf_tables_expr_destroy(struct nft_expr *expr)
+{
+       if (expr->ops->destroy)
+               expr->ops->destroy(expr);
+       module_put(expr->ops->type->owner);
+}
+
+/*
+ * Rules
+ */
+
+static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain,
+                                               u64 handle)
+{
+       struct nft_rule *rule;
+
+       // FIXME: this sucks
+       list_for_each_entry(rule, &chain->rules, list) {
+               if (handle == rule->handle)
+                       return rule;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
+                                             const struct nlattr *nla)
+{
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
+}
+
+static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
+       [NFTA_RULE_TABLE]       = { .type = NLA_STRING },
+       [NFTA_RULE_CHAIN]       = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+       [NFTA_RULE_HANDLE]      = { .type = NLA_U64 },
+       [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
+       [NFTA_RULE_COMPAT]      = { .type = NLA_NESTED },
+       [NFTA_RULE_POSITION]    = { .type = NLA_U64 },
+};
+
+static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                   int event, u32 flags, int family,
+                                   const struct nft_table *table,
+                                   const struct nft_chain *chain,
+                                   const struct nft_rule *rule)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       const struct nft_expr *expr, *next;
+       struct nlattr *list;
+       const struct nft_rule *prule;
+       int type = event | NFNL_SUBSYS_NFTABLES << 8;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+               goto nla_put_failure;
+
+       if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
+               prule = list_entry(rule->list.prev, struct nft_rule, list);
+               if (nla_put_be64(skb, NFTA_RULE_POSITION,
+                                cpu_to_be64(prule->handle)))
+                       goto nla_put_failure;
+       }
+
+       list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS);
+       if (list == NULL)
+               goto nla_put_failure;
+       nft_rule_for_each_expr(expr, next, rule) {
+               struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM);
+               if (elem == NULL)
+                       goto nla_put_failure;
+               if (nf_tables_fill_expr_info(skb, expr) < 0)
+                       goto nla_put_failure;
+               nla_nest_end(skb, elem);
+       }
+       nla_nest_end(skb, list);
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_rule_notify(const struct sk_buff *oskb,
+                                const struct nlmsghdr *nlh,
+                                const struct nft_table *table,
+                                const struct nft_chain *chain,
+                                const struct nft_rule *rule,
+                                int event, u32 flags, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = NETLINK_CB(oskb).portid;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       u32 seq = nlh->nlmsg_seq;
+       bool report;
+       int err;
+
+       report = nlmsg_report(nlh);
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
+                                      family, table, chain, rule);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static inline bool
+nft_rule_is_active(struct net *net, const struct nft_rule *rule)
+{
+       return (rule->genmask & (1 << net->nft.gencursor)) == 0;
+}
+
+static inline int gencursor_next(struct net *net)
+{
+       return net->nft.gencursor+1 == 1 ? 1 : 0;
+}
+
+static inline int
+nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
+{
+       return (rule->genmask & (1 << gencursor_next(net))) == 0;
+}
+
+static inline void
+nft_rule_activate_next(struct net *net, struct nft_rule *rule)
+{
+       /* Now inactive, will be active in the future */
+       rule->genmask = (1 << net->nft.gencursor);
+}
+
+static inline void
+nft_rule_disactivate_next(struct net *net, struct nft_rule *rule)
+{
+       rule->genmask = (1 << gencursor_next(net));
+}
+
+static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+{
+       rule->genmask = 0;
+}
+
+static int nf_tables_dump_rules(struct sk_buff *skb,
+                               struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       const struct nft_rule *rule;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       u8 genctr = ACCESS_ONCE(net->nft.genctr);
+       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       list_for_each_entry(chain, &table->chains, list) {
+                               list_for_each_entry(rule, &chain->rules, list) {
+                                       if (!nft_rule_is_active(net, rule))
+                                               goto cont;
+                                       if (idx < s_idx)
+                                               goto cont;
+                                       if (idx > s_idx)
+                                               memset(&cb->args[1], 0,
+                                                      sizeof(cb->args) - sizeof(cb->args[0]));
+                                       if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid,
+                                                                     cb->nlh->nlmsg_seq,
+                                                                     NFT_MSG_NEWRULE,
+                                                                     NLM_F_MULTI | NLM_F_APPEND,
+                                                                     afi->family, table, chain, rule) < 0)
+                                               goto done;
+cont:
+                                       idx++;
+                               }
+                       }
+               }
+       }
+done:
+       /* Invalidate this dump, a transition to the new generation happened */
+       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
+               return -EBUSY;
+
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       const struct nft_rule *rule;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_rules,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+       if (IS_ERR(rule))
+               return PTR_ERR(rule);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid,
+                                      nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
+                                      family, table, chain, rule);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+{
+       struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
+       struct nft_expr *expr;
+
+       /*
+        * Careful: some expressions might not be initialized in case this
+        * is called on error from nf_tables_newrule().
+        */
+       expr = nft_expr_first(rule);
+       while (expr->ops && expr != nft_expr_last(rule)) {
+               nf_tables_expr_destroy(expr);
+               expr = nft_expr_next(expr);
+       }
+       kfree(rule);
+}
+
+static void nf_tables_rule_destroy(struct nft_rule *rule)
+{
+       call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
+}
+
+#define NFT_RULE_MAXEXPRS      128
+
+static struct nft_expr_info *info;
+
+static struct nft_rule_trans *
+nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
+{
+       struct nft_rule_trans *rupd;
+
+       rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
+       if (rupd == NULL)
+              return NULL;
+
+       rupd->chain = ctx->chain;
+       rupd->table = ctx->table;
+       rupd->rule = rule;
+       rupd->family = ctx->afi->family;
+       rupd->nlh = ctx->nlh;
+       list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+
+       return rupd;
+}
+
+static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_rule *rule, *old_rule = NULL;
+       struct nft_rule_trans *repl = NULL;
+       struct nft_expr *expr;
+       struct nft_ctx ctx;
+       struct nlattr *tmp;
+       unsigned int size, i, n;
+       int err, rem;
+       bool create;
+       u64 handle, pos_handle;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       if (nla[NFTA_RULE_HANDLE]) {
+               handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+               rule = __nf_tables_rule_lookup(chain, handle);
+               if (IS_ERR(rule))
+                       return PTR_ERR(rule);
+
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       old_rule = rule;
+               else
+                       return -EOPNOTSUPP;
+       } else {
+               if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EINVAL;
+               handle = nf_tables_alloc_handle(table);
+       }
+
+       if (nla[NFTA_RULE_POSITION]) {
+               if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+                       return -EOPNOTSUPP;
+
+               pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+               old_rule = __nf_tables_rule_lookup(chain, pos_handle);
+               if (IS_ERR(old_rule))
+                       return PTR_ERR(old_rule);
+       }
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+       n = 0;
+       size = 0;
+       if (nla[NFTA_RULE_EXPRESSIONS]) {
+               nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
+                       err = -EINVAL;
+                       if (nla_type(tmp) != NFTA_LIST_ELEM)
+                               goto err1;
+                       if (n == NFT_RULE_MAXEXPRS)
+                               goto err1;
+                       err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+                       if (err < 0)
+                               goto err1;
+                       size += info[n].ops->size;
+                       n++;
+               }
+       }
+
+       err = -ENOMEM;
+       rule = kzalloc(sizeof(*rule) + size, GFP_KERNEL);
+       if (rule == NULL)
+               goto err1;
+
+       nft_rule_activate_next(net, rule);
+
+       rule->handle = handle;
+       rule->dlen   = size;
+
+       expr = nft_expr_first(rule);
+       for (i = 0; i < n; i++) {
+               err = nf_tables_newexpr(&ctx, &info[i], expr);
+               if (err < 0)
+                       goto err2;
+               info[i].ops = NULL;
+               expr = nft_expr_next(expr);
+       }
+
+       if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+               if (nft_rule_is_active_next(net, old_rule)) {
+                       repl = nf_tables_trans_add(old_rule, &ctx);
+                       if (repl == NULL) {
+                               err = -ENOMEM;
+                               goto err2;
+                       }
+                       nft_rule_disactivate_next(net, old_rule);
+                       list_add_tail(&rule->list, &old_rule->list);
+               } else {
+                       err = -ENOENT;
+                       goto err2;
+               }
+       } else if (nlh->nlmsg_flags & NLM_F_APPEND)
+               if (old_rule)
+                       list_add_rcu(&rule->list, &old_rule->list);
+               else
+                       list_add_tail_rcu(&rule->list, &chain->rules);
+       else {
+               if (old_rule)
+                       list_add_tail_rcu(&rule->list, &old_rule->list);
+               else
+                       list_add_rcu(&rule->list, &chain->rules);
+       }
+
+       if (nf_tables_trans_add(rule, &ctx) == NULL) {
+               err = -ENOMEM;
+               goto err3;
+       }
+       return 0;
+
+err3:
+       list_del_rcu(&rule->list);
+       if (repl) {
+               list_del_rcu(&repl->rule->list);
+               list_del(&repl->list);
+               nft_rule_clear(net, repl->rule);
+               kfree(repl);
+       }
+err2:
+       nf_tables_rule_destroy(rule);
+err1:
+       for (i = 0; i < n; i++) {
+               if (info[i].ops != NULL)
+                       module_put(info[i].ops->type->owner);
+       }
+       return err;
+}
+
+static int
+nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+       /* You cannot delete the same rule twice */
+       if (nft_rule_is_active_next(ctx->net, rule)) {
+               if (nf_tables_trans_add(rule, ctx) == NULL)
+                       return -ENOMEM;
+               nft_rule_disactivate_next(ctx->net, rule);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       const struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_rule *rule, *tmp;
+       int family = nfmsg->nfgen_family, err = 0;
+       struct nft_ctx ctx;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+       if (nla[NFTA_RULE_HANDLE]) {
+               rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+               if (IS_ERR(rule))
+                       return PTR_ERR(rule);
+
+               err = nf_tables_delrule_one(&ctx, rule);
+       } else {
+               /* Remove all rules in this chain */
+               list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
+                       err = nf_tables_delrule_one(&ctx, rule);
+                       if (err < 0)
+                               break;
+               }
+       }
+
+       return err;
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_rule_trans *rupd, *tmp;
+
+       /* Bump generation counter, invalidate any dump in progress */
+       net->nft.genctr++;
+
+       /* A new generation has just started */
+       net->nft.gencursor = gencursor_next(net);
+
+       /* Make sure all packets have left the previous generation before
+        * purging old rules.
+        */
+       synchronize_rcu();
+
+       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+               /* Delete this rule from the dirty list */
+               list_del(&rupd->list);
+
+               /* This rule was inactive in the past and just became active.
+                * Clear the next bit of the genmask since its meaning has
+                * changed, now it is the future.
+                */
+               if (nft_rule_is_active(net, rupd->rule)) {
+                       nft_rule_clear(net, rupd->rule);
+                       nf_tables_rule_notify(skb, rupd->nlh, rupd->table,
+                                             rupd->chain, rupd->rule,
+                                             NFT_MSG_NEWRULE, 0,
+                                             rupd->family);
+                       kfree(rupd);
+                       continue;
+               }
+
+               /* This rule is in the past, get rid of it */
+               list_del_rcu(&rupd->rule->list);
+               nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
+                                     rupd->rule, NFT_MSG_DELRULE, 0,
+                                     rupd->family);
+               nf_tables_rule_destroy(rupd->rule);
+               kfree(rupd);
+       }
+
+       return 0;
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_rule_trans *rupd, *tmp;
+
+       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+               /* Delete all rules from the dirty list */
+               list_del(&rupd->list);
+
+               if (!nft_rule_is_active_next(net, rupd->rule)) {
+                       nft_rule_clear(net, rupd->rule);
+                       kfree(rupd);
+                       continue;
+               }
+
+               /* This rule is inactive, get rid of it */
+               list_del_rcu(&rupd->rule->list);
+               nf_tables_rule_destroy(rupd->rule);
+               kfree(rupd);
+       }
+       return 0;
+}
+
+/*
+ * Sets
+ */
+
+static LIST_HEAD(nf_tables_set_ops);
+
+int nft_register_set(struct nft_set_ops *ops)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&ops->list, &nf_tables_set_ops);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_set);
+
+void nft_unregister_set(struct nft_set_ops *ops)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&ops->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_set);
+
+static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+{
+       const struct nft_set_ops *ops;
+       u32 features;
+
+#ifdef CONFIG_MODULES
+       if (list_empty(&nf_tables_set_ops)) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-set");
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               if (!list_empty(&nf_tables_set_ops))
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       features = 0;
+       if (nla[NFTA_SET_FLAGS] != NULL) {
+               features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+               features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+       }
+
+       // FIXME: implement selection properly
+       list_for_each_entry(ops, &nf_tables_set_ops, list) {
+               if ((ops->features & features) != features)
+                       continue;
+               if (!try_module_get(ops->owner))
+                       continue;
+               return ops;
+       }
+
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
+       [NFTA_SET_TABLE]                = { .type = NLA_STRING },
+       [NFTA_SET_NAME]                 = { .type = NLA_STRING },
+       [NFTA_SET_FLAGS]                = { .type = NLA_U32 },
+       [NFTA_SET_KEY_TYPE]             = { .type = NLA_U32 },
+       [NFTA_SET_KEY_LEN]              = { .type = NLA_U32 },
+       [NFTA_SET_DATA_TYPE]            = { .type = NLA_U32 },
+       [NFTA_SET_DATA_LEN]             = { .type = NLA_U32 },
+};
+
+static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
+                                    const struct sk_buff *skb,
+                                    const struct nlmsghdr *nlh,
+                                    const struct nlattr * const nla[])
+{
+       struct net *net = sock_net(skb->sk);
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table = NULL;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       if (nla[NFTA_SET_TABLE] != NULL) {
+               table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+               if (IS_ERR(table))
+                       return PTR_ERR(table);
+       }
+
+       nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+       return 0;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+                                    const struct nlattr *nla)
+{
+       struct nft_set *set;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       list_for_each_entry(set, &table->sets, list) {
+               if (!nla_strcmp(nla, set->name))
+                       return set;
+       }
+       return ERR_PTR(-ENOENT);
+}
+
+static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+                                   const char *name)
+{
+       const struct nft_set *i;
+       const char *p;
+       unsigned long *inuse;
+       unsigned int n = 0;
+
+       p = strnchr(name, IFNAMSIZ, '%');
+       if (p != NULL) {
+               if (p[1] != 'd' || strchr(p + 2, '%'))
+                       return -EINVAL;
+
+               inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+               if (inuse == NULL)
+                       return -ENOMEM;
+
+               list_for_each_entry(i, &ctx->table->sets, list) {
+                       if (!sscanf(i->name, name, &n))
+                               continue;
+                       if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
+                               continue;
+                       set_bit(n, inuse);
+               }
+
+               n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+               free_page((unsigned long)inuse);
+       }
+
+       snprintf(set->name, sizeof(set->name), name, n);
+       list_for_each_entry(i, &ctx->table->sets, list) {
+               if (!strcmp(set->name, i->name))
+                       return -ENFILE;
+       }
+       return 0;
+}
+
+static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+                             const struct nft_set *set, u16 event, u16 flags)
+{
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       u32 portid = NETLINK_CB(ctx->skb).portid;
+       u32 seq = ctx->nlh->nlmsg_seq;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = ctx->afi->family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+               goto nla_put_failure;
+       if (set->flags != 0)
+               if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+                       goto nla_put_failure;
+
+       if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
+               goto nla_put_failure;
+       if (set->flags & NFT_SET_MAP) {
+               if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
+                       goto nla_put_failure;
+       }
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_set_notify(const struct nft_ctx *ctx,
+                               const struct nft_set *set,
+                               int event)
+{
+       struct sk_buff *skb;
+       u32 portid = NETLINK_CB(ctx->skb).portid;
+       bool report;
+       int err;
+
+       report = nlmsg_report(ctx->nlh);
+       if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_set(skb, ctx, set, event, 0);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
+                                    struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       unsigned int idx = 0, s_idx = cb->args[0];
+
+       if (cb->args[1])
+               return skb->len;
+
+       list_for_each_entry(set, &ctx->table->sets, list) {
+               if (idx < s_idx)
+                       goto cont;
+               if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+                                      NLM_F_MULTI) < 0) {
+                       cb->args[0] = idx;
+                       goto done;
+               }
+cont:
+               idx++;
+       }
+       cb->args[1] = 1;
+done:
+       return skb->len;
+}
+
+static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
+
+       if (cb->args[1])
+               return skb->len;
+
+       list_for_each_entry(table, &ctx->afi->tables, list) {
+               if (cur_table && cur_table != table)
+                       continue;
+
+               ctx->table = table;
+               list_for_each_entry(set, &ctx->table->sets, list) {
+                       if (idx < s_idx)
+                               goto cont;
+                       if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+                                              NLM_F_MULTI) < 0) {
+                               cb->args[0] = idx;
+                               cb->args[2] = (unsigned long) table;
+                               goto done;
+                       }
+cont:
+                       idx++;
+               }
+       }
+       cb->args[1] = 1;
+done:
+       return skb->len;
+}
+
+static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       struct nlattr *nla[NFTA_SET_MAX + 1];
+       struct nft_ctx ctx;
+       int err, ret;
+
+       err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
+                         nft_set_policy);
+       if (err < 0)
+               return err;
+
+       err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       if (err < 0)
+               return err;
+
+       if (ctx.table == NULL)
+               ret = nf_tables_dump_sets_all(&ctx, skb, cb);
+       else
+               ret = nf_tables_dump_sets_table(&ctx, skb, cb);
+
+       return ret;
+}
+
+static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       const struct nft_set *set;
+       struct nft_ctx ctx;
+       struct sk_buff *skb2;
+       int err;
+
+       /* Verify existance before starting dump */
+       err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_sets,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_set_ops *ops;
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       struct nft_table *table;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       char name[IFNAMSIZ];
+       unsigned int size;
+       bool create;
+       u32 ktype, klen, dlen, dtype, flags;
+       int err;
+
+       if (nla[NFTA_SET_TABLE] == NULL ||
+           nla[NFTA_SET_NAME] == NULL ||
+           nla[NFTA_SET_KEY_LEN] == NULL)
+               return -EINVAL;
+
+       ktype = NFT_DATA_VALUE;
+       if (nla[NFTA_SET_KEY_TYPE] != NULL) {
+               ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+               if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+                       return -EINVAL;
+       }
+
+       klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+       if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       flags = 0;
+       if (nla[NFTA_SET_FLAGS] != NULL) {
+               flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+               if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
+                             NFT_SET_INTERVAL | NFT_SET_MAP))
+                       return -EINVAL;
+       }
+
+       dtype = 0;
+       dlen  = 0;
+       if (nla[NFTA_SET_DATA_TYPE] != NULL) {
+               if (!(flags & NFT_SET_MAP))
+                       return -EINVAL;
+
+               dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+               if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+                   dtype != NFT_DATA_VERDICT)
+                       return -EINVAL;
+
+               if (dtype != NFT_DATA_VERDICT) {
+                       if (nla[NFTA_SET_DATA_LEN] == NULL)
+                               return -EINVAL;
+                       dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+                       if (dlen == 0 ||
+                           dlen > FIELD_SIZEOF(struct nft_data, data))
+                               return -EINVAL;
+               } else
+                       dlen = sizeof(struct nft_data);
+       } else if (flags & NFT_SET_MAP)
+               return -EINVAL;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+
+       set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set)) {
+               if (PTR_ERR(set) != -ENOENT)
+                       return PTR_ERR(set);
+               set = NULL;
+       }
+
+       if (set != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+               return 0;
+       }
+
+       if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+               return -ENOENT;
+
+       ops = nft_select_set_ops(nla);
+       if (IS_ERR(ops))
+               return PTR_ERR(ops);
+
+       size = 0;
+       if (ops->privsize != NULL)
+               size = ops->privsize(nla);
+
+       err = -ENOMEM;
+       set = kzalloc(sizeof(*set) + size, GFP_KERNEL);
+       if (set == NULL)
+               goto err1;
+
+       nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name));
+       err = nf_tables_set_alloc_name(&ctx, set, name);
+       if (err < 0)
+               goto err2;
+
+       INIT_LIST_HEAD(&set->bindings);
+       set->ops   = ops;
+       set->ktype = ktype;
+       set->klen  = klen;
+       set->dtype = dtype;
+       set->dlen  = dlen;
+       set->flags = flags;
+
+       err = ops->init(set, nla);
+       if (err < 0)
+               goto err2;
+
+       list_add_tail(&set->list, &table->sets);
+       nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
+       return 0;
+
+err2:
+       kfree(set);
+err1:
+       module_put(ops->owner);
+       return err;
+}
+
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       list_del(&set->list);
+       if (!(set->flags & NFT_SET_ANONYMOUS))
+               nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+
+       set->ops->destroy(set);
+       module_put(set->ops->owner);
+       kfree(set);
+}
+
+static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int err;
+
+       if (nla[NFTA_SET_TABLE] == NULL)
+               return -EINVAL;
+
+       err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings))
+               return -EBUSY;
+
+       nf_tables_set_destroy(&ctx, set);
+       return 0;
+}
+
+static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+                                       const struct nft_set *set,
+                                       const struct nft_set_iter *iter,
+                                       const struct nft_set_elem *elem)
+{
+       enum nft_registers dreg;
+
+       dreg = nft_type_to_reg(set->dtype);
+       return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+}
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                      struct nft_set_binding *binding)
+{
+       struct nft_set_binding *i;
+       struct nft_set_iter iter;
+
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+               return -EBUSY;
+
+       if (set->flags & NFT_SET_MAP) {
+               /* If the set is already bound to the same chain all
+                * jumps are already validated for that chain.
+                */
+               list_for_each_entry(i, &set->bindings, list) {
+                       if (i->chain == binding->chain)
+                               goto bind;
+               }
+
+               iter.skip       = 0;
+               iter.count      = 0;
+               iter.err        = 0;
+               iter.fn         = nf_tables_bind_check_setelem;
+
+               set->ops->walk(ctx, set, &iter);
+               if (iter.err < 0) {
+                       /* Destroy anonymous sets if binding fails */
+                       if (set->flags & NFT_SET_ANONYMOUS)
+                               nf_tables_set_destroy(ctx, set);
+
+                       return iter.err;
+               }
+       }
+bind:
+       binding->chain = ctx->chain;
+       list_add_tail(&binding->list, &set->bindings);
+       return 0;
+}
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding)
+{
+       list_del(&binding->list);
+
+       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+               nf_tables_set_destroy(ctx, set);
+}
+
+/*
+ * Set elements
+ */
+
+static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
+       [NFTA_SET_ELEM_KEY]             = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_DATA]            = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_FLAGS]           = { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
+       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
+};
+
+static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
+                                     const struct sk_buff *skb,
+                                     const struct nlmsghdr *nlh,
+                                     const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+       return 0;
+}
+
+static int nf_tables_fill_setelem(struct sk_buff *skb,
+                                 const struct nft_set *set,
+                                 const struct nft_set_elem *elem)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, NFTA_LIST_ELEM);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
+                         set->klen) < 0)
+               goto nla_put_failure;
+
+       if (set->flags & NFT_SET_MAP &&
+           !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
+           nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+                         set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+                         set->dlen) < 0)
+               goto nla_put_failure;
+
+       if (elem->flags != 0)
+               if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
+                       goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -EMSGSIZE;
+}
+
+struct nft_set_dump_args {
+       const struct netlink_callback   *cb;
+       struct nft_set_iter             iter;
+       struct sk_buff                  *skb;
+};
+
+static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+                                 const struct nft_set *set,
+                                 const struct nft_set_iter *iter,
+                                 const struct nft_set_elem *elem)
+{
+       struct nft_set_dump_args *args;
+
+       args = container_of(iter, struct nft_set_dump_args, iter);
+       return nf_tables_fill_setelem(args->skb, set, elem);
+}
+
+static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       struct nft_set_dump_args args;
+       struct nft_ctx ctx;
+       struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+       u32 portid, seq;
+       int event, err;
+
+       nfmsg = nlmsg_data(cb->nlh);
+       err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
+                         nft_set_elem_list_policy);
+       if (err < 0)
+               return err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       event  = NFT_MSG_NEWSETELEM;
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       portid = NETLINK_CB(cb->skb).portid;
+       seq    = cb->nlh->nlmsg_seq;
+
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       NLM_F_MULTI);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = NFPROTO_UNSPEC;
+       nfmsg->version      = NFNETLINK_V0;
+       nfmsg->res_id       = 0;
+
+       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
+               goto nla_put_failure;
+
+       nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       args.cb         = cb;
+       args.skb        = skb;
+       args.iter.skip  = cb->args[0];
+       args.iter.count = 0;
+       args.iter.err   = 0;
+       args.iter.fn    = nf_tables_dump_setelem;
+       set->ops->walk(&ctx, set, &args.iter);
+
+       nla_nest_end(skb, nest);
+       nlmsg_end(skb, nlh);
+
+       if (args.iter.err && args.iter.err != -EMSGSIZE)
+               return args.iter.err;
+       if (args.iter.count == cb->args[0])
+               return 0;
+
+       cb->args[0] = args.iter.count;
+       return skb->len;
+
+nla_put_failure:
+       return -ENOSPC;
+}
+
+static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nft_set *set;
+       struct nft_ctx ctx;
+       int err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_set,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+       return -EOPNOTSUPP;
+}
+
+static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+                           const struct nlattr *attr)
+{
+       struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+       struct nft_data_desc d1, d2;
+       struct nft_set_elem elem;
+       struct nft_set_binding *binding;
+       enum nft_registers dreg;
+       int err;
+
+       err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+                              nft_set_elem_policy);
+       if (err < 0)
+               return err;
+
+       if (nla[NFTA_SET_ELEM_KEY] == NULL)
+               return -EINVAL;
+
+       elem.flags = 0;
+       if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
+               elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+               if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+                       return -EINVAL;
+       }
+
+       if (set->flags & NFT_SET_MAP) {
+               if (nla[NFTA_SET_ELEM_DATA] == NULL &&
+                   !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+                       return -EINVAL;
+       } else {
+               if (nla[NFTA_SET_ELEM_DATA] != NULL)
+                       return -EINVAL;
+       }
+
+       err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
+       if (err < 0)
+               goto err1;
+       err = -EINVAL;
+       if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
+               goto err2;
+
+       err = -EEXIST;
+       if (set->ops->get(set, &elem) == 0)
+               goto err2;
+
+       if (nla[NFTA_SET_ELEM_DATA] != NULL) {
+               err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+               if (err < 0)
+                       goto err2;
+
+               err = -EINVAL;
+               if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
+                       goto err3;
+
+               dreg = nft_type_to_reg(set->dtype);
+               list_for_each_entry(binding, &set->bindings, list) {
+                       struct nft_ctx bind_ctx = {
+                               .afi    = ctx->afi,
+                               .table  = ctx->table,
+                               .chain  = binding->chain,
+                       };
+
+                       err = nft_validate_data_load(&bind_ctx, dreg,
+                                                    &elem.data, d2.type);
+                       if (err < 0)
+                               goto err3;
+               }
+       }
+
+       err = set->ops->insert(set, &elem);
+       if (err < 0)
+               goto err3;
+
+       return 0;
+
+err3:
+       if (nla[NFTA_SET_ELEM_DATA] != NULL)
+               nft_data_uninit(&elem.data, d2.type);
+err2:
+       nft_data_uninit(&elem.key, d1.type);
+err1:
+       return err;
+}
+
+static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nlattr *attr;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int rem, err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+               return -EBUSY;
+
+       nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+               err = nft_add_set_elem(&ctx, set, attr);
+               if (err < 0)
+                       return err;
+       }
+       return 0;
+}
+
+static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+                          const struct nlattr *attr)
+{
+       struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+       struct nft_data_desc desc;
+       struct nft_set_elem elem;
+       int err;
+
+       err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+                              nft_set_elem_policy);
+       if (err < 0)
+               goto err1;
+
+       err = -EINVAL;
+       if (nla[NFTA_SET_ELEM_KEY] == NULL)
+               goto err1;
+
+       err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]);
+       if (err < 0)
+               goto err1;
+
+       err = -EINVAL;
+       if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+               goto err2;
+
+       err = set->ops->get(set, &elem);
+       if (err < 0)
+               goto err2;
+
+       set->ops->remove(set, &elem);
+
+       nft_data_uninit(&elem.key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(&elem.data, set->dtype);
+
+err2:
+       nft_data_uninit(&elem.key, desc.type);
+err1:
+       return err;
+}
+
+static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nlattr *attr;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int rem, err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+               return -EBUSY;
+
+       nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+               err = nft_del_setelem(&ctx, set, attr);
+               if (err < 0)
+                       return err;
+       }
+       return 0;
+}
+
+static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+       [NFT_MSG_NEWTABLE] = {
+               .call           = nf_tables_newtable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_GETTABLE] = {
+               .call           = nf_tables_gettable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_DELTABLE] = {
+               .call           = nf_tables_deltable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_NEWCHAIN] = {
+               .call           = nf_tables_newchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_GETCHAIN] = {
+               .call           = nf_tables_getchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_DELCHAIN] = {
+               .call           = nf_tables_delchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_NEWRULE] = {
+               .call_batch     = nf_tables_newrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_GETRULE] = {
+               .call           = nf_tables_getrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_DELRULE] = {
+               .call_batch     = nf_tables_delrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_NEWSET] = {
+               .call           = nf_tables_newset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_GETSET] = {
+               .call           = nf_tables_getset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_DELSET] = {
+               .call           = nf_tables_delset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_NEWSETELEM] = {
+               .call           = nf_tables_newsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+       [NFT_MSG_GETSETELEM] = {
+               .call           = nf_tables_getsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+       [NFT_MSG_DELSETELEM] = {
+               .call           = nf_tables_delsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+};
+
+static const struct nfnetlink_subsystem nf_tables_subsys = {
+       .name           = "nf_tables",
+       .subsys_id      = NFNL_SUBSYS_NFTABLES,
+       .cb_count       = NFT_MSG_MAX,
+       .cb             = nf_tables_cb,
+       .commit         = nf_tables_commit,
+       .abort          = nf_tables_abort,
+};
+
+/*
+ * Loop detection - walk through the ruleset beginning at the destination chain
+ * of a new jump until either the source chain is reached (loop) or all
+ * reachable chains have been traversed.
+ *
+ * The loop check is performed whenever a new jump verdict is added to an
+ * expression or verdict map or a verdict map is bound to a new chain.
+ */
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+                                const struct nft_chain *chain);
+
+static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+                                       const struct nft_set *set,
+                                       const struct nft_set_iter *iter,
+                                       const struct nft_set_elem *elem)
+{
+       switch (elem->data.verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               return nf_tables_check_loops(ctx, elem->data.chain);
+       default:
+               return 0;
+       }
+}
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+                                const struct nft_chain *chain)
+{
+       const struct nft_rule *rule;
+       const struct nft_expr *expr, *last;
+       const struct nft_set *set;
+       struct nft_set_binding *binding;
+       struct nft_set_iter iter;
+
+       if (ctx->chain == chain)
+               return -ELOOP;
+
+       list_for_each_entry(rule, &chain->rules, list) {
+               nft_rule_for_each_expr(expr, last, rule) {
+                       const struct nft_data *data = NULL;
+                       int err;
+
+                       if (!expr->ops->validate)
+                               continue;
+
+                       err = expr->ops->validate(ctx, expr, &data);
+                       if (err < 0)
+                               return err;
+
+                       if (data == NULL)
+                               continue;
+
+                       switch (data->verdict) {
+                       case NFT_JUMP:
+                       case NFT_GOTO:
+                               err = nf_tables_check_loops(ctx, data->chain);
+                               if (err < 0)
+                                       return err;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       list_for_each_entry(set, &ctx->table->sets, list) {
+               if (!(set->flags & NFT_SET_MAP) ||
+                   set->dtype != NFT_DATA_VERDICT)
+                       continue;
+
+               list_for_each_entry(binding, &set->bindings, list) {
+                       if (binding->chain != chain)
+                               continue;
+
+                       iter.skip       = 0;
+                       iter.count      = 0;
+                       iter.err        = 0;
+                       iter.fn         = nf_tables_loop_check_setelem;
+
+                       set->ops->walk(ctx, set, &iter);
+                       if (iter.err < 0)
+                               return iter.err;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *     nft_validate_input_register - validate an expressions' input register
+ *
+ *     @reg: the register number
+ *
+ *     Validate that the input register is one of the general purpose
+ *     registers.
+ */
+int nft_validate_input_register(enum nft_registers reg)
+{
+       if (reg <= NFT_REG_VERDICT)
+               return -EINVAL;
+       if (reg > NFT_REG_MAX)
+               return -ERANGE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_input_register);
+
+/**
+ *     nft_validate_output_register - validate an expressions' output register
+ *
+ *     @reg: the register number
+ *
+ *     Validate that the output register is one of the general purpose
+ *     registers or the verdict register.
+ */
+int nft_validate_output_register(enum nft_registers reg)
+{
+       if (reg < NFT_REG_VERDICT)
+               return -EINVAL;
+       if (reg > NFT_REG_MAX)
+               return -ERANGE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_output_register);
+
+/**
+ *     nft_validate_data_load - validate an expressions' data load
+ *
+ *     @ctx: context of the expression performing the load
+ *     @reg: the destination register number
+ *     @data: the data to load
+ *     @type: the data type
+ *
+ *     Validate that a data load uses the appropriate data type for
+ *     the destination register. A value of NULL for the data means
+ *     that its runtime gathered data, which is always of type
+ *     NFT_DATA_VALUE.
+ */
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+                          const struct nft_data *data,
+                          enum nft_data_types type)
+{
+       int err;
+
+       switch (reg) {
+       case NFT_REG_VERDICT:
+               if (data == NULL || type != NFT_DATA_VERDICT)
+                       return -EINVAL;
+
+               if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
+                       err = nf_tables_check_loops(ctx, data->chain);
+                       if (err < 0)
+                               return err;
+
+                       if (ctx->chain->level + 1 > data->chain->level) {
+                               if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
+                                       return -EMLINK;
+                               data->chain->level = ctx->chain->level + 1;
+                       }
+               }
+
+               return 0;
+       default:
+               if (data != NULL && type != NFT_DATA_VALUE)
+                       return -EINVAL;
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(nft_validate_data_load);
+
+static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+       [NFTA_VERDICT_CODE]     = { .type = NLA_U32 },
+       [NFTA_VERDICT_CHAIN]    = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+};
+
+static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+                           struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       struct nlattr *tb[NFTA_VERDICT_MAX + 1];
+       struct nft_chain *chain;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_VERDICT_CODE])
+               return -EINVAL;
+       data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+       switch (data->verdict) {
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+       case NFT_CONTINUE:
+       case NFT_BREAK:
+       case NFT_RETURN:
+               desc->len = sizeof(data->verdict);
+               break;
+       case NFT_JUMP:
+       case NFT_GOTO:
+               if (!tb[NFTA_VERDICT_CHAIN])
+                       return -EINVAL;
+               chain = nf_tables_chain_lookup(ctx->table,
+                                              tb[NFTA_VERDICT_CHAIN]);
+               if (IS_ERR(chain))
+                       return PTR_ERR(chain);
+               if (chain->flags & NFT_BASE_CHAIN)
+                       return -EOPNOTSUPP;
+
+               chain->use++;
+               data->chain = chain;
+               desc->len = sizeof(data);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       desc->type = NFT_DATA_VERDICT;
+       return 0;
+}
+
+static void nft_verdict_uninit(const struct nft_data *data)
+{
+       switch (data->verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               data->chain->use--;
+               break;
+       }
+}
+
+static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, NFTA_DATA_VERDICT);
+       if (!nest)
+               goto nla_put_failure;
+
+       if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict)))
+               goto nla_put_failure;
+
+       switch (data->verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name))
+                       goto nla_put_failure;
+       }
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
+                         struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       unsigned int len;
+
+       len = nla_len(nla);
+       if (len == 0)
+               return -EINVAL;
+       if (len > sizeof(data->data))
+               return -EOVERFLOW;
+
+       nla_memcpy(data->data, nla, sizeof(data->data));
+       desc->type = NFT_DATA_VALUE;
+       desc->len  = len;
+       return 0;
+}
+
+static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
+                         unsigned int len)
+{
+       return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
+}
+
+static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
+       [NFTA_DATA_VALUE]       = { .type = NLA_BINARY,
+                                   .len  = FIELD_SIZEOF(struct nft_data, data) },
+       [NFTA_DATA_VERDICT]     = { .type = NLA_NESTED },
+};
+
+/**
+ *     nft_data_init - parse nf_tables data netlink attributes
+ *
+ *     @ctx: context of the expression using the data
+ *     @data: destination struct nft_data
+ *     @desc: data description
+ *     @nla: netlink attribute containing data
+ *
+ *     Parse the netlink data attributes and initialize a struct nft_data.
+ *     The type and length of data are returned in the data description.
+ *
+ *     The caller can indicate that it only wants to accept data of type
+ *     NFT_DATA_VALUE by passing NULL for the ctx argument.
+ */
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+                 struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       struct nlattr *tb[NFTA_DATA_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DATA_VALUE])
+               return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+       if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
+               return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(nft_data_init);
+
+/**
+ *     nft_data_uninit - release a nft_data item
+ *
+ *     @data: struct nft_data to release
+ *     @type: type of data
+ *
+ *     Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
+ *     all others need to be released by calling this function.
+ */
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
+{
+       switch (type) {
+       case NFT_DATA_VALUE:
+               return;
+       case NFT_DATA_VERDICT:
+               return nft_verdict_uninit(data);
+       default:
+               WARN_ON(1);
+       }
+}
+EXPORT_SYMBOL_GPL(nft_data_uninit);
+
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+                 enum nft_data_types type, unsigned int len)
+{
+       struct nlattr *nest;
+       int err;
+
+       nest = nla_nest_start(skb, attr);
+       if (nest == NULL)
+               return -1;
+
+       switch (type) {
+       case NFT_DATA_VALUE:
+               err = nft_value_dump(skb, data, len);
+               break;
+       case NFT_DATA_VERDICT:
+               err = nft_verdict_dump(skb, data);
+               break;
+       default:
+               err = -EINVAL;
+               WARN_ON(1);
+       }
+
+       nla_nest_end(skb, nest);
+       return err;
+}
+EXPORT_SYMBOL_GPL(nft_data_dump);
+
+static int nf_tables_init_net(struct net *net)
+{
+       INIT_LIST_HEAD(&net->nft.af_info);
+       INIT_LIST_HEAD(&net->nft.commit_list);
+       return 0;
+}
+
+static struct pernet_operations nf_tables_net_ops = {
+       .init   = nf_tables_init_net,
+};
+
+static int __init nf_tables_module_init(void)
+{
+       int err;
+
+       info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
+                      GFP_KERNEL);
+       if (info == NULL) {
+               err = -ENOMEM;
+               goto err1;
+       }
+
+       err = nf_tables_core_module_init();
+       if (err < 0)
+               goto err2;
+
+       err = nfnetlink_subsys_register(&nf_tables_subsys);
+       if (err < 0)
+               goto err3;
+
+       pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+       return register_pernet_subsys(&nf_tables_net_ops);
+err3:
+       nf_tables_core_module_exit();
+err2:
+       kfree(info);
+err1:
+       return err;
+}
+
+static void __exit nf_tables_module_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_net_ops);
+       nfnetlink_subsys_unregister(&nf_tables_subsys);
+       nf_tables_core_module_exit();
+       kfree(info);
+}
+
+module_init(nf_tables_module_init);
+module_exit(nf_tables_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
new file mode 100644 (file)
index 0000000..cb9e685
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+
+static void nft_cmp_fast_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1])
+{
+       const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       u32 mask;
+
+       mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
+       if ((data[priv->sreg].data[0] & mask) == priv->data)
+               return;
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static bool nft_payload_fast_eval(const struct nft_expr *expr,
+                                 struct nft_data data[NFT_REG_MAX + 1],
+                                 const struct nft_pktinfo *pkt)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       struct nft_data *dest = &data[priv->dreg];
+       unsigned char *ptr;
+
+       if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
+               ptr = skb_network_header(skb);
+       else
+               ptr = skb_network_header(skb) + pkt->xt.thoff;
+
+       ptr += priv->offset;
+
+       if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
+               return false;
+
+       if (priv->len == 2)
+               *(u16 *)dest->data = *(u16 *)ptr;
+       else if (priv->len == 4)
+               *(u32 *)dest->data = *(u32 *)ptr;
+       else
+               *(u8 *)dest->data = *(u8 *)ptr;
+       return true;
+}
+
+struct nft_jumpstack {
+       const struct nft_chain  *chain;
+       const struct nft_rule   *rule;
+       int                     rulenum;
+};
+
+static inline void
+nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
+               struct nft_jumpstack *jumpstack, unsigned int stackptr)
+{
+       struct nft_stats __percpu *stats;
+       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(chain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
+}
+
+enum nft_trace {
+       NFT_TRACE_RULE,
+       NFT_TRACE_RETURN,
+       NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+       [NFT_TRACE_RULE]        = "rule",
+       [NFT_TRACE_RETURN]      = "return",
+       [NFT_TRACE_POLICY]      = "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+       .type = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level = 4,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+                                   const struct nft_chain *chain,
+                                   int rulenum, enum nft_trace type)
+{
+       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+       nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+                     pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+                     chain->table->name, chain->name, comments[type],
+                     rulenum);
+}
+
+unsigned int
+nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+{
+       const struct nft_chain *chain = ops->priv;
+       const struct nft_rule *rule;
+       const struct nft_expr *expr, *last;
+       struct nft_data data[NFT_REG_MAX + 1];
+       unsigned int stackptr = 0;
+       struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
+       int rulenum = 0;
+       /*
+        * Cache cursor to avoid problems in case that the cursor is updated
+        * while traversing the ruleset.
+        */
+       unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+
+do_chain:
+       rule = list_entry(&chain->rules, struct nft_rule, list);
+next_rule:
+       data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+       list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
+
+               /* This rule is not active, skip. */
+               if (unlikely(rule->genmask & (1 << gencursor)))
+                       continue;
+
+               rulenum++;
+
+               nft_rule_for_each_expr(expr, last, rule) {
+                       if (expr->ops == &nft_cmp_fast_ops)
+                               nft_cmp_fast_eval(expr, data);
+                       else if (expr->ops != &nft_payload_fast_ops ||
+                                !nft_payload_fast_eval(expr, data, pkt))
+                               expr->ops->eval(expr, data, pkt);
+
+                       if (data[NFT_REG_VERDICT].verdict != NFT_CONTINUE)
+                               break;
+               }
+
+               switch (data[NFT_REG_VERDICT].verdict) {
+               case NFT_BREAK:
+                       data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+                       /* fall through */
+               case NFT_CONTINUE:
+                       continue;
+               }
+               break;
+       }
+
+       switch (data[NFT_REG_VERDICT].verdict) {
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+               return data[NFT_REG_VERDICT].verdict;
+       case NFT_JUMP:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+               BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
+               jumpstack[stackptr].chain = chain;
+               jumpstack[stackptr].rule  = rule;
+               jumpstack[stackptr].rulenum = rulenum;
+               stackptr++;
+               /* fall through */
+       case NFT_GOTO:
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
+       case NFT_RETURN:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
+
+               /* fall through */
+       case NFT_CONTINUE:
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       if (stackptr > 0) {
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+
+               stackptr--;
+               chain = jumpstack[stackptr].chain;
+               rule  = jumpstack[stackptr].rule;
+               rulenum = jumpstack[stackptr].rulenum;
+               goto next_rule;
+       }
+       nft_chain_stats(chain, pkt, jumpstack, stackptr);
+
+       if (unlikely(pkt->skb->nf_trace))
+               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+
+       return nft_base_chain(chain)->policy;
+}
+EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+
+int __init nf_tables_core_module_init(void)
+{
+       int err;
+
+       err = nft_immediate_module_init();
+       if (err < 0)
+               goto err1;
+
+       err = nft_cmp_module_init();
+       if (err < 0)
+               goto err2;
+
+       err = nft_lookup_module_init();
+       if (err < 0)
+               goto err3;
+
+       err = nft_bitwise_module_init();
+       if (err < 0)
+               goto err4;
+
+       err = nft_byteorder_module_init();
+       if (err < 0)
+               goto err5;
+
+       err = nft_payload_module_init();
+       if (err < 0)
+               goto err6;
+
+       return 0;
+
+err6:
+       nft_byteorder_module_exit();
+err5:
+       nft_bitwise_module_exit();
+err4:
+       nft_lookup_module_exit();
+err3:
+       nft_cmp_module_exit();
+err2:
+       nft_immediate_module_exit();
+err1:
+       return err;
+}
+
+void nf_tables_core_module_exit(void)
+{
+       nft_payload_module_exit();
+       nft_byteorder_module_exit();
+       nft_bitwise_module_exit();
+       nft_lookup_module_exit();
+       nft_cmp_module_exit();
+       nft_immediate_module_exit();
+}
index 572d87dc116ffa838d2f9f8838129156add7284e..027f16af51a0f1bde86b1805259166b4befa18ff 100644 (file)
@@ -147,9 +147,6 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        const struct nfnetlink_subsystem *ss;
        int type, err;
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
        /* All the messages must at least contain nfgenmsg */
        if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
                return 0;
@@ -217,9 +214,179 @@ replay:
        }
 }
 
+static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+                               u_int16_t subsys_id)
+{
+       struct sk_buff *nskb, *oskb = skb;
+       struct net *net = sock_net(skb->sk);
+       const struct nfnetlink_subsystem *ss;
+       const struct nfnl_callback *nc;
+       bool success = true, done = false;
+       int err;
+
+       if (subsys_id >= NFNL_SUBSYS_COUNT)
+               return netlink_ack(skb, nlh, -EINVAL);
+replay:
+       nskb = netlink_skb_clone(oskb, GFP_KERNEL);
+       if (!nskb)
+               return netlink_ack(oskb, nlh, -ENOMEM);
+
+       nskb->sk = oskb->sk;
+       skb = nskb;
+
+       nfnl_lock(subsys_id);
+       ss = rcu_dereference_protected(table[subsys_id].subsys,
+                                      lockdep_is_held(&table[subsys_id].mutex));
+       if (!ss) {
+#ifdef CONFIG_MODULES
+               nfnl_unlock(subsys_id);
+               request_module("nfnetlink-subsys-%d", subsys_id);
+               nfnl_lock(subsys_id);
+               ss = rcu_dereference_protected(table[subsys_id].subsys,
+                                              lockdep_is_held(&table[subsys_id].mutex));
+               if (!ss)
+#endif
+               {
+                       nfnl_unlock(subsys_id);
+                       kfree_skb(nskb);
+                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               }
+       }
+
+       if (!ss->commit || !ss->abort) {
+               nfnl_unlock(subsys_id);
+               kfree_skb(nskb);
+               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+       }
+
+       while (skb->len >= nlmsg_total_size(0)) {
+               int msglen, type;
+
+               nlh = nlmsg_hdr(skb);
+               err = 0;
+
+               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               /* Only requests are handled by the kernel */
+               if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               type = nlh->nlmsg_type;
+               if (type == NFNL_MSG_BATCH_BEGIN) {
+                       /* Malformed: Batch begin twice */
+                       success = false;
+                       goto done;
+               } else if (type == NFNL_MSG_BATCH_END) {
+                       done = true;
+                       goto done;
+               } else if (type < NLMSG_MIN_TYPE) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               /* We only accept a batch with messages for the same
+                * subsystem.
+                */
+               if (NFNL_SUBSYS_ID(type) != subsys_id) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               nc = nfnetlink_find_client(type, ss);
+               if (!nc) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               {
+                       int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
+                       u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+                       struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
+                       struct nlattr *attr = (void *)nlh + min_len;
+                       int attrlen = nlh->nlmsg_len - min_len;
+
+                       err = nla_parse(cda, ss->cb[cb_id].attr_count,
+                                       attr, attrlen, ss->cb[cb_id].policy);
+                       if (err < 0)
+                               goto ack;
+
+                       if (nc->call_batch) {
+                               err = nc->call_batch(net->nfnl, skb, nlh,
+                                                    (const struct nlattr **)cda);
+                       }
+
+                       /* The lock was released to autoload some module, we
+                        * have to abort and start from scratch using the
+                        * original skb.
+                        */
+                       if (err == -EAGAIN) {
+                               ss->abort(skb);
+                               nfnl_unlock(subsys_id);
+                               kfree_skb(nskb);
+                               goto replay;
+                       }
+               }
+ack:
+               if (nlh->nlmsg_flags & NLM_F_ACK || err) {
+                       /* We don't stop processing the batch on errors, thus,
+                        * userspace gets all the errors that the batch
+                        * triggers.
+                        */
+                       netlink_ack(skb, nlh, err);
+                       if (err)
+                               success = false;
+               }
+
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msglen > skb->len)
+                       msglen = skb->len;
+               skb_pull(skb, msglen);
+       }
+done:
+       if (success && done)
+               ss->commit(skb);
+       else
+               ss->abort(skb);
+
+       nfnl_unlock(subsys_id);
+       kfree_skb(nskb);
+}
+
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
-       netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+       struct nlmsghdr *nlh = nlmsg_hdr(skb);
+       struct net *net = sock_net(skb->sk);
+       int msglen;
+
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+               return netlink_ack(skb, nlh, -EPERM);
+
+       if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+           skb->len < nlh->nlmsg_len)
+               return;
+
+       if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
+               struct nfgenmsg *nfgenmsg;
+
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msglen > skb->len)
+                       msglen = skb->len;
+
+               if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+                   skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+                       return;
+
+               nfgenmsg = nlmsg_data(nlh);
+               skb_pull(skb, msglen);
+               nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+       } else {
+               netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+       }
 }
 
 #ifdef CONFIG_MODULES
index 50580494148d00433c092ee0ad2a7097a1653221..476accd171452fcfbfbe01018dcadd55fee41f67 100644 (file)
@@ -49,10 +49,8 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
 };
 
 static int
-ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
-                         struct nf_conntrack_l4proto *l4proto,
-                         struct net *net,
-                         const struct nlattr *attr)
+ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto,
+                         struct net *net, const struct nlattr *attr)
 {
        int ret = 0;
 
@@ -64,8 +62,7 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
                if (ret < 0)
                        return ret;
 
-               ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
-                                                         &timeout->data);
+               ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
        }
        return ret;
 }
@@ -123,7 +120,8 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
                                goto err_proto_put;
                        }
 
-                       ret = ctnl_timeout_parse_policy(matching, l4proto, net,
+                       ret = ctnl_timeout_parse_policy(&matching->data,
+                                                       l4proto, net,
                                                        cda[CTA_TIMEOUT_DATA]);
                        return ret;
                }
@@ -138,7 +136,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
                goto err_proto_put;
        }
 
-       ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
+       ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net,
                                        cda[CTA_TIMEOUT_DATA]);
        if (ret < 0)
                goto err;
@@ -342,6 +340,147 @@ cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
        return ret;
 }
 
+static int
+cttimeout_default_set(struct sock *ctnl, struct sk_buff *skb,
+                     const struct nlmsghdr *nlh,
+                     const struct nlattr * const cda[])
+{
+       __u16 l3num;
+       __u8 l4num;
+       struct nf_conntrack_l4proto *l4proto;
+       struct net *net = sock_net(skb->sk);
+       unsigned int *timeouts;
+       int ret;
+
+       if (!cda[CTA_TIMEOUT_L3PROTO] ||
+           !cda[CTA_TIMEOUT_L4PROTO] ||
+           !cda[CTA_TIMEOUT_DATA])
+               return -EINVAL;
+
+       l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+       l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supported, skip. */
+       if (l4proto->l4proto != l4num) {
+               ret = -EOPNOTSUPP;
+               goto err;
+       }
+
+       timeouts = l4proto->get_timeouts(net);
+
+       ret = ctnl_timeout_parse_policy(timeouts, l4proto, net,
+                                       cda[CTA_TIMEOUT_DATA]);
+       if (ret < 0)
+               goto err;
+
+       nf_ct_l4proto_put(l4proto);
+       return 0;
+err:
+       nf_ct_l4proto_put(l4proto);
+       return ret;
+}
+
+static int
+cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+                           u32 seq, u32 type, int event,
+                           struct nf_conntrack_l4proto *l4proto)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+       event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               goto nlmsg_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = AF_UNSPEC;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+           nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
+               goto nla_put_failure;
+
+       if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+               struct nlattr *nest_parms;
+               unsigned int *timeouts = l4proto->get_timeouts(net);
+               int ret;
+
+               nest_parms = nla_nest_start(skb,
+                                           CTA_TIMEOUT_DATA | NLA_F_NESTED);
+               if (!nest_parms)
+                       goto nla_put_failure;
+
+               ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+               if (ret < 0)
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, nest_parms);
+       }
+
+       nlmsg_end(skb, nlh);
+       return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -1;
+}
+
+static int cttimeout_default_get(struct sock *ctnl, struct sk_buff *skb,
+                                const struct nlmsghdr *nlh,
+                                const struct nlattr * const cda[])
+{
+       __u16 l3num;
+       __u8 l4num;
+       struct nf_conntrack_l4proto *l4proto;
+       struct net *net = sock_net(skb->sk);
+       struct sk_buff *skb2;
+       int ret, err;
+
+       if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
+               return -EINVAL;
+
+       l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+       l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supported, skip. */
+       if (l4proto->l4proto != l4num) {
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       ret = cttimeout_default_fill_info(net, skb2, NETLINK_CB(skb).portid,
+                                         nlh->nlmsg_seq,
+                                         NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                         IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+                                         l4proto);
+       if (ret <= 0) {
+               kfree_skb(skb2);
+               err = -ENOMEM;
+               goto err;
+       }
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+       if (ret > 0)
+               ret = 0;
+
+       /* this avoids a loop in nfnetlink. */
+       return ret == -EAGAIN ? -ENOBUFS : ret;
+err:
+       nf_ct_l4proto_put(l4proto);
+       return err;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
 {
@@ -384,6 +523,12 @@ static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
        [IPCTNL_MSG_TIMEOUT_DELETE]     = { .call = cttimeout_del_timeout,
                                            .attr_count = CTA_TIMEOUT_MAX,
                                            .policy = cttimeout_nla_policy },
+       [IPCTNL_MSG_TIMEOUT_DEFAULT_SET]= { .call = cttimeout_default_set,
+                                           .attr_count = CTA_TIMEOUT_MAX,
+                                           .policy = cttimeout_nla_policy },
+       [IPCTNL_MSG_TIMEOUT_DEFAULT_GET]= { .call = cttimeout_default_get,
+                                           .attr_count = CTA_TIMEOUT_MAX,
+                                           .policy = cttimeout_nla_policy },
 };
 
 static const struct nfnetlink_subsystem cttimeout_subsys = {
index d92cc317bf8b25a0c371b770688c860169c9dea8..3c4b69e5fe17348b422f390bf79ab4269043fe5d 100644 (file)
@@ -319,7 +319,8 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
 }
 
 static struct sk_buff *
-nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
+nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
+                unsigned int pkt_size)
 {
        struct sk_buff *skb;
        unsigned int n;
@@ -328,13 +329,13 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
         * message.  WARNING: has to be <= 128k due to slab restrictions */
 
        n = max(inst_size, pkt_size);
-       skb = nfnetlink_alloc_skb(&init_net, n, peer_portid, GFP_ATOMIC);
+       skb = nfnetlink_alloc_skb(net, n, peer_portid, GFP_ATOMIC);
        if (!skb) {
                if (n > pkt_size) {
                        /* try to allocate only as much as we need for current
                         * packet */
 
-                       skb = nfnetlink_alloc_skb(&init_net, pkt_size,
+                       skb = nfnetlink_alloc_skb(net, pkt_size,
                                                  peer_portid, GFP_ATOMIC);
                        if (!skb)
                                pr_err("nfnetlink_log: can't even alloc %u bytes\n",
@@ -702,8 +703,8 @@ nfulnl_log_packet(struct net *net,
        }
 
        if (!inst->skb) {
-               inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz,
-                                            size);
+               inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
+                                            inst->nlbufsiz, size);
                if (!inst->skb)
                        goto alloc_failure;
        }
index ae2e5c11d01ac9887c5158d0084f193c624373ef..21258cf7009118c3820c7b9575b4a4bd1b331f1f 100644 (file)
@@ -298,7 +298,7 @@ nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 }
 
 static struct sk_buff *
-nfqnl_build_packet_message(struct nfqnl_instance *queue,
+nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                           struct nf_queue_entry *entry,
                           __be32 **packet_id_ptr)
 {
@@ -372,7 +372,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (queue->flags & NFQA_CFG_F_CONNTRACK)
                ct = nfqnl_ct_get(entskb, &size, &ctinfo);
 
-       skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
+       skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
                                  GFP_ATOMIC);
        if (!skb)
                return NULL;
@@ -525,7 +525,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
        __be32 *packet_id_ptr;
        int failopen = 0;
 
-       nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
+       nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
        if (nskb == NULL) {
                err = -ENOMEM;
                goto err_out;
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
new file mode 100644 (file)
index 0000000..4fb6ee2
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_bitwise {
+       enum nft_registers      sreg:8;
+       enum nft_registers      dreg:8;
+       u8                      len;
+       struct nft_data         mask;
+       struct nft_data         xor;
+};
+
+static void nft_bitwise_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       const struct nft_bitwise *priv = nft_expr_priv(expr);
+       const struct nft_data *src = &data[priv->sreg];
+       struct nft_data *dst = &data[priv->dreg];
+       unsigned int i;
+
+       for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) {
+               dst->data[i] = (src->data[i] & priv->mask.data[i]) ^
+                              priv->xor.data[i];
+       }
+}
+
+static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
+       [NFTA_BITWISE_SREG]     = { .type = NLA_U32 },
+       [NFTA_BITWISE_DREG]     = { .type = NLA_U32 },
+       [NFTA_BITWISE_LEN]      = { .type = NLA_U32 },
+       [NFTA_BITWISE_MASK]     = { .type = NLA_NESTED },
+       [NFTA_BITWISE_XOR]      = { .type = NLA_NESTED },
+};
+
+static int nft_bitwise_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_bitwise *priv = nft_expr_priv(expr);
+       struct nft_data_desc d1, d2;
+       int err;
+
+       if (tb[NFTA_BITWISE_SREG] == NULL ||
+           tb[NFTA_BITWISE_DREG] == NULL ||
+           tb[NFTA_BITWISE_LEN] == NULL ||
+           tb[NFTA_BITWISE_MASK] == NULL ||
+           tb[NFTA_BITWISE_XOR] == NULL)
+               return -EINVAL;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+
+       err = nft_data_init(NULL, &priv->mask, &d1, tb[NFTA_BITWISE_MASK]);
+       if (err < 0)
+               return err;
+       if (d1.len != priv->len)
+               return -EINVAL;
+
+       err = nft_data_init(NULL, &priv->xor, &d2, tb[NFTA_BITWISE_XOR]);
+       if (err < 0)
+               return err;
+       if (d2.len != priv->len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_bitwise *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_BITWISE_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BITWISE_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_bitwise_type;
+static const struct nft_expr_ops nft_bitwise_ops = {
+       .type           = &nft_bitwise_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
+       .eval           = nft_bitwise_eval,
+       .init           = nft_bitwise_init,
+       .dump           = nft_bitwise_dump,
+};
+
+static struct nft_expr_type nft_bitwise_type __read_mostly = {
+       .name           = "bitwise",
+       .ops            = &nft_bitwise_ops,
+       .policy         = nft_bitwise_policy,
+       .maxattr        = NFTA_BITWISE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_bitwise_module_init(void)
+{
+       return nft_register_expr(&nft_bitwise_type);
+}
+
+void nft_bitwise_module_exit(void)
+{
+       nft_unregister_expr(&nft_bitwise_type);
+}
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
new file mode 100644 (file)
index 0000000..c39ed8d
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_byteorder {
+       enum nft_registers      sreg:8;
+       enum nft_registers      dreg:8;
+       enum nft_byteorder_ops  op:8;
+       u8                      len;
+       u8                      size;
+};
+
+static void nft_byteorder_eval(const struct nft_expr *expr,
+                              struct nft_data data[NFT_REG_MAX + 1],
+                              const struct nft_pktinfo *pkt)
+{
+       const struct nft_byteorder *priv = nft_expr_priv(expr);
+       struct nft_data *src = &data[priv->sreg], *dst = &data[priv->dreg];
+       union { u32 u32; u16 u16; } *s, *d;
+       unsigned int i;
+
+       s = (void *)src->data;
+       d = (void *)dst->data;
+
+       switch (priv->size) {
+       case 4:
+               switch (priv->op) {
+               case NFT_BYTEORDER_NTOH:
+                       for (i = 0; i < priv->len / 4; i++)
+                               d[i].u32 = ntohl((__force __be32)s[i].u32);
+                       break;
+               case NFT_BYTEORDER_HTON:
+                       for (i = 0; i < priv->len / 4; i++)
+                               d[i].u32 = (__force __u32)htonl(s[i].u32);
+                       break;
+               }
+               break;
+       case 2:
+               switch (priv->op) {
+               case NFT_BYTEORDER_NTOH:
+                       for (i = 0; i < priv->len / 2; i++)
+                               d[i].u16 = ntohs((__force __be16)s[i].u16);
+                       break;
+               case NFT_BYTEORDER_HTON:
+                       for (i = 0; i < priv->len / 2; i++)
+                               d[i].u16 = (__force __u16)htons(s[i].u16);
+                       break;
+               }
+               break;
+       }
+}
+
+static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
+       [NFTA_BYTEORDER_SREG]   = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_DREG]   = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_OP]     = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_LEN]    = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_SIZE]   = { .type = NLA_U32 },
+};
+
+static int nft_byteorder_init(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nlattr * const tb[])
+{
+       struct nft_byteorder *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_BYTEORDER_SREG] == NULL ||
+           tb[NFTA_BYTEORDER_DREG] == NULL ||
+           tb[NFTA_BYTEORDER_LEN] == NULL ||
+           tb[NFTA_BYTEORDER_SIZE] == NULL ||
+           tb[NFTA_BYTEORDER_OP] == NULL)
+               return -EINVAL;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
+       switch (priv->op) {
+       case NFT_BYTEORDER_NTOH:
+       case NFT_BYTEORDER_HTON:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+       if (priv->len == 0 || priv->len > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
+       switch (priv->size) {
+       case 2:
+       case 4:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_byteorder *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_BYTEORDER_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_byteorder_type;
+static const struct nft_expr_ops nft_byteorder_ops = {
+       .type           = &nft_byteorder_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
+       .eval           = nft_byteorder_eval,
+       .init           = nft_byteorder_init,
+       .dump           = nft_byteorder_dump,
+};
+
+static struct nft_expr_type nft_byteorder_type __read_mostly = {
+       .name           = "byteorder",
+       .ops            = &nft_byteorder_ops,
+       .policy         = nft_byteorder_policy,
+       .maxattr        = NFTA_BYTEORDER_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_byteorder_module_init(void)
+{
+       return nft_register_expr(&nft_byteorder_type);
+}
+
+void nft_byteorder_module_exit(void)
+{
+       nft_unregister_expr(&nft_byteorder_type);
+}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
new file mode 100644 (file)
index 0000000..954925d
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_cmp_expr {
+       struct nft_data         data;
+       enum nft_registers      sreg:8;
+       u8                      len;
+       enum nft_cmp_ops        op:8;
+};
+
+static void nft_cmp_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+       int d;
+
+       d = nft_data_cmp(&data[priv->sreg], &priv->data, priv->len);
+       switch (priv->op) {
+       case NFT_CMP_EQ:
+               if (d != 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_NEQ:
+               if (d == 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_LT:
+               if (d == 0)
+                       goto mismatch;
+       case NFT_CMP_LTE:
+               if (d > 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_GT:
+               if (d == 0)
+                       goto mismatch;
+       case NFT_CMP_GTE:
+               if (d < 0)
+                       goto mismatch;
+               break;
+       }
+       return;
+
+mismatch:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
+       [NFTA_CMP_SREG]         = { .type = NLA_U32 },
+       [NFTA_CMP_OP]           = { .type = NLA_U32 },
+       [NFTA_CMP_DATA]         = { .type = NLA_NESTED },
+};
+
+static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_cmp_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       int err;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+       priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+
+       err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
+       BUG_ON(err < 0);
+
+       priv->len = desc.len;
+       return 0;
+}
+
+static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_cmp_type;
+static const struct nft_expr_ops nft_cmp_ops = {
+       .type           = &nft_cmp_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
+       .eval           = nft_cmp_eval,
+       .init           = nft_cmp_init,
+       .dump           = nft_cmp_dump,
+};
+
+static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       struct nft_data data;
+       u32 mask;
+       int err;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+
+       err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+       BUG_ON(err < 0);
+       desc.len *= BITS_PER_BYTE;
+
+       mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len);
+       priv->data = data.data[0] & mask;
+       priv->len  = desc.len;
+       return 0;
+}
+
+static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       struct nft_data data;
+
+       if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
+               goto nla_put_failure;
+
+       data.data[0] = priv->data;
+       if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
+                         NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+const struct nft_expr_ops nft_cmp_fast_ops = {
+       .type           = &nft_cmp_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
+       .eval           = NULL, /* inlined */
+       .init           = nft_cmp_fast_init,
+       .dump           = nft_cmp_fast_dump,
+};
+
+static const struct nft_expr_ops *
+nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
+{
+       struct nft_data_desc desc;
+       struct nft_data data;
+       enum nft_registers sreg;
+       enum nft_cmp_ops op;
+       int err;
+
+       if (tb[NFTA_CMP_SREG] == NULL ||
+           tb[NFTA_CMP_OP] == NULL ||
+           tb[NFTA_CMP_DATA] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+       err = nft_validate_input_register(sreg);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+       switch (op) {
+       case NFT_CMP_EQ:
+       case NFT_CMP_NEQ:
+       case NFT_CMP_LT:
+       case NFT_CMP_LTE:
+       case NFT_CMP_GT:
+       case NFT_CMP_GTE:
+               break;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+
+       err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
+               return &nft_cmp_fast_ops;
+       else
+               return &nft_cmp_ops;
+}
+
+static struct nft_expr_type nft_cmp_type __read_mostly = {
+       .name           = "cmp",
+       .select_ops     = nft_cmp_select_ops,
+       .policy         = nft_cmp_policy,
+       .maxattr        = NFTA_CMP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_cmp_module_init(void)
+{
+       return nft_register_expr(&nft_cmp_type);
+}
+
+void nft_cmp_module_exit(void)
+{
+       nft_unregister_expr(&nft_cmp_type);
+}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
new file mode 100644 (file)
index 0000000..4811f76
--- /dev/null
@@ -0,0 +1,768 @@
+/*
+ * (C) 2012-2013 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This software has been sponsored by Sophos Astaro <http://www.sophos.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/netfilter/nf_tables_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <asm/uaccess.h> /* for set_fs */
+#include <net/netfilter/nf_tables.h>
+
+union nft_entry {
+       struct ipt_entry e4;
+       struct ip6t_entry e6;
+};
+
+static inline void
+nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
+{
+       par->target     = xt;
+       par->targinfo   = xt_info;
+       par->hotdrop    = false;
+}
+
+static void nft_target_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_target *target = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       int ret;
+
+       nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+       ret = target->target(skb, &pkt->xt);
+
+       if (pkt->xt.hotdrop)
+               ret = NF_DROP;
+
+       switch(ret) {
+       case XT_CONTINUE:
+               data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+               break;
+       default:
+               data[NFT_REG_VERDICT].verdict = ret;
+               break;
+       }
+       return;
+}
+
+static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+       [NFTA_TARGET_NAME]      = { .type = NLA_NUL_STRING },
+       [NFTA_TARGET_REV]       = { .type = NLA_U32 },
+       [NFTA_TARGET_INFO]      = { .type = NLA_BINARY },
+};
+
+static void
+nft_target_set_tgchk_param(struct xt_tgchk_param *par,
+                          const struct nft_ctx *ctx,
+                          struct xt_target *target, void *info,
+                          union nft_entry *entry, u8 proto, bool inv)
+{
+       par->net        = &init_net;
+       par->table      = ctx->table->name;
+       switch (ctx->afi->family) {
+       case AF_INET:
+               entry->e4.ip.proto = proto;
+               entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+               break;
+       case AF_INET6:
+               entry->e6.ipv6.proto = proto;
+               entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+               break;
+       }
+       par->entryinfo  = entry;
+       par->target     = target;
+       par->targinfo   = info;
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               par->hook_mask = 1 << ops->hooknum;
+       }
+       par->family     = ctx->afi->family;
+}
+
+static void target_compat_from_user(struct xt_target *t, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+       if (t->compat_from_user) {
+               int pad;
+
+               t->compat_from_user(out, in);
+               pad = XT_ALIGN(t->targetsize) - t->targetsize;
+               if (pad > 0)
+                       memset(out + t->targetsize, 0, pad);
+       } else
+#endif
+               memcpy(out, in, XT_ALIGN(t->targetsize));
+}
+
+static inline int nft_compat_target_offset(struct xt_target *target)
+{
+#ifdef CONFIG_COMPAT
+       return xt_compat_target_offset(target);
+#else
+       return 0;
+#endif
+}
+
+static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
+       [NFTA_RULE_COMPAT_PROTO]        = { .type = NLA_U32 },
+       [NFTA_RULE_COMPAT_FLAGS]        = { .type = NLA_U32 },
+};
+
+static u8 nft_parse_compat(const struct nlattr *attr, bool *inv)
+{
+       struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+       u32 flags;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr,
+                              nft_rule_compat_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
+               return -EINVAL;
+
+       flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+       if (flags & ~NFT_RULE_COMPAT_F_MASK)
+               return -EINVAL;
+       if (flags & NFT_RULE_COMPAT_F_INV)
+               *inv = true;
+
+       return ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+}
+
+static int
+nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+               const struct nlattr * const tb[])
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_target *target = expr->ops->data;
+       struct xt_tgchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
+       u8 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+       int ret;
+
+       target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
+
+       if (ctx->nla[NFTA_RULE_COMPAT])
+               proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+       nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+
+       ret = xt_check_target(&par, size, proto, inv);
+       if (ret < 0)
+               goto err;
+
+       /* The standard target cannot be used */
+       if (target->target == NULL) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       return 0;
+err:
+       module_put(target->me);
+       return ret;
+}
+
+static void
+nft_target_destroy(const struct nft_expr *expr)
+{
+       struct xt_target *target = expr->ops->data;
+
+       module_put(target->me);
+}
+
+static int
+target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
+{
+       int ret;
+
+#ifdef CONFIG_COMPAT
+       if (t->compat_to_user) {
+               mm_segment_t old_fs;
+               void *out;
+
+               out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC);
+               if (out == NULL)
+                       return -ENOMEM;
+
+               /* We want to reuse existing compat_to_user */
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               t->compat_to_user(out, in);
+               set_fs(old_fs);
+               ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
+               kfree(out);
+       } else
+#endif
+               ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in);
+
+       return ret;
+}
+
+static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct xt_target *target = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+
+       if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
+           nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
+           target_dump_info(skb, target, info))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_target_validate(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr,
+                              const struct nft_data **data)
+{
+       struct xt_target *target = expr->ops->data;
+       unsigned int hook_mask = 0;
+
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               hook_mask = 1 << ops->hooknum;
+               if (hook_mask & target->hooks)
+                       return 0;
+
+               /* This target is being called from an invalid chain */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void nft_match_eval(const struct nft_expr *expr,
+                          struct nft_data data[NFT_REG_MAX + 1],
+                          const struct nft_pktinfo *pkt)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       bool ret;
+
+       nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
+
+       ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
+
+       if (pkt->xt.hotdrop) {
+               data[NFT_REG_VERDICT].verdict = NF_DROP;
+               return;
+       }
+
+       switch(ret) {
+       case true:
+               data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+               break;
+       case false:
+               data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+               break;
+       }
+}
+
+static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+       [NFTA_MATCH_NAME]       = { .type = NLA_NUL_STRING },
+       [NFTA_MATCH_REV]        = { .type = NLA_U32 },
+       [NFTA_MATCH_INFO]       = { .type = NLA_BINARY },
+};
+
+/* struct xt_mtchk_param and xt_tgchk_param look very similar */
+static void
+nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
+                         struct xt_match *match, void *info,
+                         union nft_entry *entry, u8 proto, bool inv)
+{
+       par->net        = &init_net;
+       par->table      = ctx->table->name;
+       switch (ctx->afi->family) {
+       case AF_INET:
+               entry->e4.ip.proto = proto;
+               entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+               break;
+       case AF_INET6:
+               entry->e6.ipv6.proto = proto;
+               entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+               break;
+       }
+       par->entryinfo  = entry;
+       par->match      = match;
+       par->matchinfo  = info;
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               par->hook_mask = 1 << ops->hooknum;
+       }
+       par->family     = ctx->afi->family;
+}
+
+static void match_compat_from_user(struct xt_match *m, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+       if (m->compat_from_user) {
+               int pad;
+
+               m->compat_from_user(out, in);
+               pad = XT_ALIGN(m->matchsize) - m->matchsize;
+               if (pad > 0)
+                       memset(out + m->matchsize, 0, pad);
+       } else
+#endif
+               memcpy(out, in, XT_ALIGN(m->matchsize));
+}
+
+static int
+nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+               const struct nlattr * const tb[])
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct xt_mtchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
+       u8 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+       int ret;
+
+       match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
+
+       if (ctx->nla[NFTA_RULE_COMPAT])
+               proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+       nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+
+       ret = xt_check_match(&par, size, proto, inv);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+err:
+       module_put(match->me);
+       return ret;
+}
+
+static void
+nft_match_destroy(const struct nft_expr *expr)
+{
+       struct xt_match *match = expr->ops->data;
+
+       module_put(match->me);
+}
+
+static int
+match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
+{
+       int ret;
+
+#ifdef CONFIG_COMPAT
+       if (m->compat_to_user) {
+               mm_segment_t old_fs;
+               void *out;
+
+               out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC);
+               if (out == NULL)
+                       return -ENOMEM;
+
+               /* We want to reuse existing compat_to_user */
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               m->compat_to_user(out, in);
+               set_fs(old_fs);
+               ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
+               kfree(out);
+       } else
+#endif
+               ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in);
+
+       return ret;
+}
+
+static inline int nft_compat_match_offset(struct xt_match *match)
+{
+#ifdef CONFIG_COMPAT
+       return xt_compat_match_offset(match);
+#else
+       return 0;
+#endif
+}
+
+static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+
+       if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+           nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
+           match_dump_info(skb, match, info))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_match_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+{
+       struct xt_match *match = expr->ops->data;
+       unsigned int hook_mask = 0;
+
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               hook_mask = 1 << ops->hooknum;
+               if (hook_mask & match->hooks)
+                       return 0;
+
+               /* This match is being called from an invalid chain */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int
+nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+                     int event, u16 family, const char *name,
+                     int rev, int target)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+       event |= NFNL_SUBSYS_NFT_COMPAT << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               goto nlmsg_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = family;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
+           nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
+           nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -1;
+}
+
+static int
+nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
+               const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+       int ret = 0, target;
+       struct nfgenmsg *nfmsg;
+       const char *fmt;
+       const char *name;
+       u32 rev;
+       struct sk_buff *skb2;
+
+       if (tb[NFTA_COMPAT_NAME] == NULL ||
+           tb[NFTA_COMPAT_REV] == NULL ||
+           tb[NFTA_COMPAT_TYPE] == NULL)
+               return -EINVAL;
+
+       name = nla_data(tb[NFTA_COMPAT_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
+       target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
+
+       nfmsg = nlmsg_data(nlh);
+
+       switch(nfmsg->nfgen_family) {
+       case AF_INET:
+               fmt = "ipt_%s";
+               break;
+       case AF_INET6:
+               fmt = "ip6t_%s";
+               break;
+       default:
+               pr_err("nft_compat: unsupported protocol %d\n",
+                       nfmsg->nfgen_family);
+               return -EINVAL;
+       }
+
+       try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
+                                                rev, target, &ret),
+                                                fmt, name);
+
+       if (ret < 0)
+               return ret;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       /* include the best revision for this extension in the message */
+       if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
+                                 nlh->nlmsg_seq,
+                                 NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                 NFNL_MSG_COMPAT_GET,
+                                 nfmsg->nfgen_family,
+                                 name, ret, target) <= 0) {
+               kfree_skb(skb2);
+               return -ENOSPC;
+       }
+
+       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+                               MSG_DONTWAIT);
+       if (ret > 0)
+               ret = 0;
+
+       return ret == -EAGAIN ? -ENOBUFS : ret;
+}
+
+static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+       [NFTA_COMPAT_NAME]      = { .type = NLA_NUL_STRING,
+                                   .len = NFT_COMPAT_NAME_MAX-1 },
+       [NFTA_COMPAT_REV]       = { .type = NLA_U32 },
+       [NFTA_COMPAT_TYPE]      = { .type = NLA_U32 },
+};
+
+static const struct nfnl_callback nfnl_nft_compat_cb[NFNL_MSG_COMPAT_MAX] = {
+       [NFNL_MSG_COMPAT_GET]           = { .call = nfnl_compat_get,
+                                           .attr_count = NFTA_COMPAT_MAX,
+                                           .policy = nfnl_compat_policy_get },
+};
+
+static const struct nfnetlink_subsystem nfnl_compat_subsys = {
+       .name           = "nft-compat",
+       .subsys_id      = NFNL_SUBSYS_NFT_COMPAT,
+       .cb_count       = NFNL_MSG_COMPAT_MAX,
+       .cb             = nfnl_nft_compat_cb,
+};
+
+static LIST_HEAD(nft_match_list);
+
+struct nft_xt {
+       struct list_head        head;
+       struct nft_expr_ops     ops;
+};
+
+static struct nft_expr_type nft_match_type;
+
+static const struct nft_expr_ops *
+nft_match_select_ops(const struct nft_ctx *ctx,
+                    const struct nlattr * const tb[])
+{
+       struct nft_xt *nft_match;
+       struct xt_match *match;
+       char *mt_name;
+       __u32 rev, family;
+
+       if (tb[NFTA_MATCH_NAME] == NULL ||
+           tb[NFTA_MATCH_REV] == NULL ||
+           tb[NFTA_MATCH_INFO] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       mt_name = nla_data(tb[NFTA_MATCH_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
+       family = ctx->afi->family;
+
+       /* Re-use the existing match if it's already loaded. */
+       list_for_each_entry(nft_match, &nft_match_list, head) {
+               struct xt_match *match = nft_match->ops.data;
+
+               if (strcmp(match->name, mt_name) == 0 &&
+                   match->revision == rev && match->family == family)
+                       return &nft_match->ops;
+       }
+
+       match = xt_request_find_match(family, mt_name, rev);
+       if (IS_ERR(match))
+               return ERR_PTR(-ENOENT);
+
+       /* This is the first time we use this match, allocate operations */
+       nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+       if (nft_match == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       nft_match->ops.type = &nft_match_type;
+       nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) +
+                                           nft_compat_match_offset(match));
+       nft_match->ops.eval = nft_match_eval;
+       nft_match->ops.init = nft_match_init;
+       nft_match->ops.destroy = nft_match_destroy;
+       nft_match->ops.dump = nft_match_dump;
+       nft_match->ops.validate = nft_match_validate;
+       nft_match->ops.data = match;
+
+       list_add(&nft_match->head, &nft_match_list);
+
+       return &nft_match->ops;
+}
+
+static void nft_match_release(void)
+{
+       struct nft_xt *nft_match;
+
+       list_for_each_entry(nft_match, &nft_match_list, head)
+               kfree(nft_match);
+}
+
+static struct nft_expr_type nft_match_type __read_mostly = {
+       .name           = "match",
+       .select_ops     = nft_match_select_ops,
+       .policy         = nft_match_policy,
+       .maxattr        = NFTA_MATCH_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static LIST_HEAD(nft_target_list);
+
+static struct nft_expr_type nft_target_type;
+
+static const struct nft_expr_ops *
+nft_target_select_ops(const struct nft_ctx *ctx,
+                     const struct nlattr * const tb[])
+{
+       struct nft_xt *nft_target;
+       struct xt_target *target;
+       char *tg_name;
+       __u32 rev, family;
+
+       if (tb[NFTA_TARGET_NAME] == NULL ||
+           tb[NFTA_TARGET_REV] == NULL ||
+           tb[NFTA_TARGET_INFO] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       tg_name = nla_data(tb[NFTA_TARGET_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
+       family = ctx->afi->family;
+
+       /* Re-use the existing target if it's already loaded. */
+       list_for_each_entry(nft_target, &nft_match_list, head) {
+               struct xt_target *target = nft_target->ops.data;
+
+               if (strcmp(target->name, tg_name) == 0 &&
+                   target->revision == rev && target->family == family)
+                       return &nft_target->ops;
+       }
+
+       target = xt_request_find_target(family, tg_name, rev);
+       if (IS_ERR(target))
+               return ERR_PTR(-ENOENT);
+
+       /* This is the first time we use this target, allocate operations */
+       nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+       if (nft_target == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       nft_target->ops.type = &nft_target_type;
+       nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) +
+                                            nft_compat_target_offset(target));
+       nft_target->ops.eval = nft_target_eval;
+       nft_target->ops.init = nft_target_init;
+       nft_target->ops.destroy = nft_target_destroy;
+       nft_target->ops.dump = nft_target_dump;
+       nft_target->ops.validate = nft_target_validate;
+       nft_target->ops.data = target;
+
+       list_add(&nft_target->head, &nft_target_list);
+
+       return &nft_target->ops;
+}
+
+static void nft_target_release(void)
+{
+       struct nft_xt *nft_target;
+
+       list_for_each_entry(nft_target, &nft_target_list, head)
+               kfree(nft_target);
+}
+
+static struct nft_expr_type nft_target_type __read_mostly = {
+       .name           = "target",
+       .select_ops     = nft_target_select_ops,
+       .policy         = nft_target_policy,
+       .maxattr        = NFTA_TARGET_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_compat_module_init(void)
+{
+       int ret;
+
+       ret = nft_register_expr(&nft_match_type);
+       if (ret < 0)
+               return ret;
+
+       ret = nft_register_expr(&nft_target_type);
+       if (ret < 0)
+               goto err_match;
+
+       ret = nfnetlink_subsys_register(&nfnl_compat_subsys);
+       if (ret < 0) {
+               pr_err("nft_compat: cannot register with nfnetlink.\n");
+               goto err_target;
+       }
+
+       pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
+
+       return ret;
+
+err_target:
+       nft_unregister_expr(&nft_target_type);
+err_match:
+       nft_unregister_expr(&nft_match_type);
+       return ret;
+}
+
+static void __exit nft_compat_module_exit(void)
+{
+       nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+       nft_unregister_expr(&nft_target_type);
+       nft_unregister_expr(&nft_match_type);
+       nft_match_release();
+       nft_target_release();
+}
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+
+module_init(nft_compat_module_init);
+module_exit(nft_compat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("match");
+MODULE_ALIAS_NFT_EXPR("target");
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
new file mode 100644 (file)
index 0000000..c89ee48
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/seqlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_counter {
+       seqlock_t       lock;
+       u64             bytes;
+       u64             packets;
+};
+
+static void nft_counter_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+
+       write_seqlock_bh(&priv->lock);
+       priv->bytes += pkt->skb->len;
+       priv->packets++;
+       write_sequnlock_bh(&priv->lock);
+}
+
+static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+       unsigned int seq;
+       u64 bytes;
+       u64 packets;
+
+       do {
+               seq = read_seqbegin(&priv->lock);
+               bytes   = priv->bytes;
+               packets = priv->packets;
+       } while (read_seqretry(&priv->lock, seq));
+
+       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+       [NFTA_COUNTER_PACKETS]  = { .type = NLA_U64 },
+       [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
+};
+
+static int nft_counter_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_COUNTER_PACKETS])
+               priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       if (tb[NFTA_COUNTER_BYTES])
+               priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+
+       seqlock_init(&priv->lock);
+       return 0;
+}
+
+static struct nft_expr_type nft_counter_type;
+static const struct nft_expr_ops nft_counter_ops = {
+       .type           = &nft_counter_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+       .eval           = nft_counter_eval,
+       .init           = nft_counter_init,
+       .dump           = nft_counter_dump,
+};
+
+static struct nft_expr_type nft_counter_type __read_mostly = {
+       .name           = "counter",
+       .ops            = &nft_counter_ops,
+       .policy         = nft_counter_policy,
+       .maxattr        = NFTA_COUNTER_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_counter_module_init(void)
+{
+       return nft_register_expr(&nft_counter_type);
+}
+
+static void __exit nft_counter_module_exit(void)
+{
+       nft_unregister_expr(&nft_counter_type);
+}
+
+module_init(nft_counter_module_init);
+module_exit(nft_counter_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("counter");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
new file mode 100644 (file)
index 0000000..955f4e6
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+struct nft_ct {
+       enum nft_ct_keys        key:8;
+       enum ip_conntrack_dir   dir:8;
+       enum nft_registers      dreg:8;
+       uint8_t                 family;
+};
+
+static void nft_ct_eval(const struct nft_expr *expr,
+                       struct nft_data data[NFT_REG_MAX + 1],
+                       const struct nft_pktinfo *pkt)
+{
+       const struct nft_ct *priv = nft_expr_priv(expr);
+       struct nft_data *dest = &data[priv->dreg];
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       const struct nf_conn_help *help;
+       const struct nf_conntrack_tuple *tuple;
+       const struct nf_conntrack_helper *helper;
+       long diff;
+       unsigned int state;
+
+       ct = nf_ct_get(pkt->skb, &ctinfo);
+
+       switch (priv->key) {
+       case NFT_CT_STATE:
+               if (ct == NULL)
+                       state = NF_CT_STATE_INVALID_BIT;
+               else if (nf_ct_is_untracked(ct))
+                       state = NF_CT_STATE_UNTRACKED_BIT;
+               else
+                       state = NF_CT_STATE_BIT(ctinfo);
+               dest->data[0] = state;
+               return;
+       }
+
+       if (ct == NULL)
+               goto err;
+
+       switch (priv->key) {
+       case NFT_CT_DIRECTION:
+               dest->data[0] = CTINFO2DIR(ctinfo);
+               return;
+       case NFT_CT_STATUS:
+               dest->data[0] = ct->status;
+               return;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+               dest->data[0] = ct->mark;
+               return;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+               dest->data[0] = ct->secmark;
+               return;
+#endif
+       case NFT_CT_EXPIRATION:
+               diff = (long)jiffies - (long)ct->timeout.expires;
+               if (diff < 0)
+                       diff = 0;
+               dest->data[0] = jiffies_to_msecs(diff);
+               return;
+       case NFT_CT_HELPER:
+               if (ct->master == NULL)
+                       goto err;
+               help = nfct_help(ct->master);
+               if (help == NULL)
+                       goto err;
+               helper = rcu_dereference(help->helper);
+               if (helper == NULL)
+                       goto err;
+               if (strlen(helper->name) >= sizeof(dest->data))
+                       goto err;
+               strncpy((char *)dest->data, helper->name, sizeof(dest->data));
+               return;
+       }
+
+       tuple = &ct->tuplehash[priv->dir].tuple;
+       switch (priv->key) {
+       case NFT_CT_L3PROTOCOL:
+               dest->data[0] = nf_ct_l3num(ct);
+               return;
+       case NFT_CT_SRC:
+               memcpy(dest->data, tuple->src.u3.all,
+                      nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+               return;
+       case NFT_CT_DST:
+               memcpy(dest->data, tuple->dst.u3.all,
+                      nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+               return;
+       case NFT_CT_PROTOCOL:
+               dest->data[0] = nf_ct_protonum(ct);
+               return;
+       case NFT_CT_PROTO_SRC:
+               dest->data[0] = (__force __u16)tuple->src.u.all;
+               return;
+       case NFT_CT_PROTO_DST:
+               dest->data[0] = (__force __u16)tuple->dst.u.all;
+               return;
+       }
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
+       [NFTA_CT_DREG]          = { .type = NLA_U32 },
+       [NFTA_CT_KEY]           = { .type = NLA_U32 },
+       [NFTA_CT_DIRECTION]     = { .type = NLA_U8 },
+};
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+                      const struct nft_expr *expr,
+                      const struct nlattr * const tb[])
+{
+       struct nft_ct *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_CT_DREG] == NULL ||
+           tb[NFTA_CT_KEY] == NULL)
+               return -EINVAL;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+       if (tb[NFTA_CT_DIRECTION] != NULL) {
+               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+               switch (priv->dir) {
+               case IP_CT_DIR_ORIGINAL:
+               case IP_CT_DIR_REPLY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       switch (priv->key) {
+       case NFT_CT_STATE:
+       case NFT_CT_DIRECTION:
+       case NFT_CT_STATUS:
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+#endif
+       case NFT_CT_EXPIRATION:
+       case NFT_CT_HELPER:
+               if (tb[NFTA_CT_DIRECTION] != NULL)
+                       return -EINVAL;
+               break;
+       case NFT_CT_PROTOCOL:
+       case NFT_CT_SRC:
+       case NFT_CT_DST:
+       case NFT_CT_PROTO_SRC:
+       case NFT_CT_PROTO_DST:
+               if (tb[NFTA_CT_DIRECTION] == NULL)
+                       return -EINVAL;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+       if (err < 0)
+               return err;
+       priv->family = ctx->afi->family;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               goto err1;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               goto err1;
+       return 0;
+
+err1:
+       nf_ct_l3proto_module_put(ctx->afi->family);
+       return err;
+}
+
+static void nft_ct_destroy(const struct nft_expr *expr)
+{
+       struct nft_ct *priv = nft_expr_priv(expr);
+
+       nf_ct_l3proto_module_put(priv->family);
+}
+
+static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_ct *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_CT_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_ct_type;
+static const struct nft_expr_ops nft_ct_ops = {
+       .type           = &nft_ct_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+       .eval           = nft_ct_eval,
+       .init           = nft_ct_init,
+       .destroy        = nft_ct_destroy,
+       .dump           = nft_ct_dump,
+};
+
+static struct nft_expr_type nft_ct_type __read_mostly = {
+       .name           = "ct",
+       .ops            = &nft_ct_ops,
+       .policy         = nft_ct_policy,
+       .maxattr        = NFTA_CT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_ct_module_init(void)
+{
+       return nft_register_expr(&nft_ct_type);
+}
+
+static void __exit nft_ct_module_exit(void)
+{
+       nft_unregister_expr(&nft_ct_type);
+}
+
+module_init(nft_ct_module_init);
+module_exit(nft_ct_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("ct");
diff --git a/net/netfilter/nft_expr_template.c b/net/netfilter/nft_expr_template.c
new file mode 100644 (file)
index 0000000..b6eed4d
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_template {
+
+};
+
+static void nft_template_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1],
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static const struct nla_policy nft_template_policy[NFTA_TEMPLATE_MAX + 1] = {
+       [NFTA_TEMPLATE_ATTR]            = { .type = NLA_U32 },
+};
+
+static int nft_template_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+       return 0;
+}
+
+static void nft_template_destroy(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr)
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static int nft_template_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_template *priv = nft_expr_priv(expr);
+
+       NLA_PUT_BE32(skb, NFTA_TEMPLATE_ATTR, priv->field);
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_template_type;
+static const struct nft_expr_ops nft_template_ops = {
+       .type           = &nft_template_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_template)),
+       .eval           = nft_template_eval,
+       .init           = nft_template_init,
+       .destroy        = nft_template_destroy,
+       .dump           = nft_template_dump,
+};
+
+static struct nft_expr_type nft_template_type __read_mostly = {
+       .name           = "template",
+       .ops            = &nft_template_ops,
+       .policy         = nft_template_policy,
+       .maxattr        = NFTA_TEMPLATE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_template_module_init(void)
+{
+       return nft_register_expr(&nft_template_type);
+}
+
+static void __exit nft_template_module_exit(void)
+{
+       nft_unregister_expr(&nft_template_type);
+}
+
+module_init(nft_template_module_init);
+module_exit(nft_template_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("template");
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
new file mode 100644 (file)
index 0000000..8e0bb75
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+// FIXME:
+#include <net/ipv6.h>
+
+struct nft_exthdr {
+       u8                      type;
+       u8                      offset;
+       u8                      len;
+       enum nft_registers      dreg:8;
+};
+
+static void nft_exthdr_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       struct nft_exthdr *priv = nft_expr_priv(expr);
+       struct nft_data *dest = &data[priv->dreg];
+       unsigned int offset;
+       int err;
+
+       err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
+       if (err < 0)
+               goto err;
+       offset += priv->offset;
+
+       if (skb_copy_bits(pkt->skb, offset, dest->data, priv->len) < 0)
+               goto err;
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+       [NFTA_EXTHDR_DREG]              = { .type = NLA_U32 },
+       [NFTA_EXTHDR_TYPE]              = { .type = NLA_U8 },
+       [NFTA_EXTHDR_OFFSET]            = { .type = NLA_U32 },
+       [NFTA_EXTHDR_LEN]               = { .type = NLA_U32 },
+};
+
+static int nft_exthdr_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_exthdr *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_EXTHDR_DREG] == NULL ||
+           tb[NFTA_EXTHDR_TYPE] == NULL ||
+           tb[NFTA_EXTHDR_OFFSET] == NULL ||
+           tb[NFTA_EXTHDR_LEN] == NULL)
+               return -EINVAL;
+
+       priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+       priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+       priv->len    = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+       if (priv->len == 0 ||
+           priv->len > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_EXTHDR_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_exthdr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_EXTHDR_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_exthdr_type;
+static const struct nft_expr_ops nft_exthdr_ops = {
+       .type           = &nft_exthdr_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+       .eval           = nft_exthdr_eval,
+       .init           = nft_exthdr_init,
+       .dump           = nft_exthdr_dump,
+};
+
+static struct nft_expr_type nft_exthdr_type __read_mostly = {
+       .name           = "exthdr",
+       .ops            = &nft_exthdr_ops,
+       .policy         = nft_exthdr_policy,
+       .maxattr        = NFTA_EXTHDR_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_exthdr_module_init(void)
+{
+       return nft_register_expr(&nft_exthdr_type);
+}
+
+static void __exit nft_exthdr_module_exit(void)
+{
+       nft_unregister_expr(&nft_exthdr_type);
+}
+
+module_init(nft_exthdr_module_init);
+module_exit(nft_exthdr_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("exthdr");
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
new file mode 100644 (file)
index 0000000..3d3f8fc
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_hash {
+       struct hlist_head       *hash;
+       unsigned int            hsize;
+};
+
+struct nft_hash_elem {
+       struct hlist_node       hnode;
+       struct nft_data         key;
+       struct nft_data         data[];
+};
+
+static u32 nft_hash_rnd __read_mostly;
+static bool nft_hash_rnd_initted __read_mostly;
+
+static unsigned int nft_hash_data(const struct nft_data *data,
+                                 unsigned int hsize, unsigned int len)
+{
+       unsigned int h;
+
+       h = jhash(data->data, len, nft_hash_rnd);
+       return ((u64)h * hsize) >> 32;
+}
+
+static bool nft_hash_lookup(const struct nft_set *set,
+                           const struct nft_data *key,
+                           struct nft_data *data)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_elem *he;
+       unsigned int h;
+
+       h = nft_hash_data(key, priv->hsize, set->klen);
+       hlist_for_each_entry(he, &priv->hash[h], hnode) {
+               if (nft_data_cmp(&he->key, key, set->klen))
+                       continue;
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(data, he->data);
+               return true;
+       }
+       return false;
+}
+
+static void nft_hash_elem_destroy(const struct nft_set *set,
+                                 struct nft_hash_elem *he)
+{
+       nft_data_uninit(&he->key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(he->data, set->dtype);
+       kfree(he);
+}
+
+static int nft_hash_insert(const struct nft_set *set,
+                          const struct nft_set_elem *elem)
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       unsigned int size, h;
+
+       if (elem->flags != 0)
+               return -EINVAL;
+
+       size = sizeof(*he);
+       if (set->flags & NFT_SET_MAP)
+               size += sizeof(he->data[0]);
+
+       he = kzalloc(size, GFP_KERNEL);
+       if (he == NULL)
+               return -ENOMEM;
+
+       nft_data_copy(&he->key, &elem->key);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_copy(he->data, &elem->data);
+
+       h = nft_hash_data(&he->key, priv->hsize, set->klen);
+       hlist_add_head_rcu(&he->hnode, &priv->hash[h]);
+       return 0;
+}
+
+static void nft_hash_remove(const struct nft_set *set,
+                           const struct nft_set_elem *elem)
+{
+       struct nft_hash_elem *he = elem->cookie;
+
+       hlist_del_rcu(&he->hnode);
+       kfree(he);
+}
+
+static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       unsigned int h;
+
+       h = nft_hash_data(&elem->key, priv->hsize, set->klen);
+       hlist_for_each_entry(he, &priv->hash[h], hnode) {
+               if (nft_data_cmp(&he->key, &elem->key, set->klen))
+                       continue;
+
+               elem->cookie = he;
+               elem->flags  = 0;
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&elem->data, he->data);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+                         struct nft_set_iter *iter)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_elem *he;
+       struct nft_set_elem elem;
+       unsigned int i;
+
+       for (i = 0; i < priv->hsize; i++) {
+               hlist_for_each_entry(he, &priv->hash[i], hnode) {
+                       if (iter->count < iter->skip)
+                               goto cont;
+
+                       memcpy(&elem.key, &he->key, sizeof(elem.key));
+                       if (set->flags & NFT_SET_MAP)
+                               memcpy(&elem.data, he->data, sizeof(elem.data));
+                       elem.flags = 0;
+
+                       iter->err = iter->fn(ctx, set, iter, &elem);
+                       if (iter->err < 0)
+                               return;
+cont:
+                       iter->count++;
+               }
+       }
+}
+
+static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+{
+       return sizeof(struct nft_hash);
+}
+
+static int nft_hash_init(const struct nft_set *set,
+                        const struct nlattr * const tb[])
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       unsigned int cnt, i;
+
+       if (unlikely(!nft_hash_rnd_initted)) {
+               get_random_bytes(&nft_hash_rnd, 4);
+               nft_hash_rnd_initted = true;
+       }
+
+       /* Aim for a load factor of 0.75 */
+       // FIXME: temporarily broken until we have set descriptions
+       cnt = 100;
+       cnt = cnt * 4 / 3;
+
+       priv->hash = kcalloc(cnt, sizeof(struct hlist_head), GFP_KERNEL);
+       if (priv->hash == NULL)
+               return -ENOMEM;
+       priv->hsize = cnt;
+
+       for (i = 0; i < cnt; i++)
+               INIT_HLIST_HEAD(&priv->hash[i]);
+
+       return 0;
+}
+
+static void nft_hash_destroy(const struct nft_set *set)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct hlist_node *next;
+       struct nft_hash_elem *elem;
+       unsigned int i;
+
+       for (i = 0; i < priv->hsize; i++) {
+               hlist_for_each_entry_safe(elem, next, &priv->hash[i], hnode) {
+                       hlist_del(&elem->hnode);
+                       nft_hash_elem_destroy(set, elem);
+               }
+       }
+       kfree(priv->hash);
+}
+
+static struct nft_set_ops nft_hash_ops __read_mostly = {
+       .privsize       = nft_hash_privsize,
+       .init           = nft_hash_init,
+       .destroy        = nft_hash_destroy,
+       .get            = nft_hash_get,
+       .insert         = nft_hash_insert,
+       .remove         = nft_hash_remove,
+       .lookup         = nft_hash_lookup,
+       .walk           = nft_hash_walk,
+       .features       = NFT_SET_MAP,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_hash_module_init(void)
+{
+       return nft_register_set(&nft_hash_ops);
+}
+
+static void __exit nft_hash_module_exit(void)
+{
+       nft_unregister_set(&nft_hash_ops);
+}
+
+module_init(nft_hash_module_init);
+module_exit(nft_hash_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
new file mode 100644 (file)
index 0000000..f169501
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_immediate_expr {
+       struct nft_data         data;
+       enum nft_registers      dreg:8;
+       u8                      dlen;
+};
+
+static void nft_immediate_eval(const struct nft_expr *expr,
+                              struct nft_data data[NFT_REG_MAX + 1],
+                              const struct nft_pktinfo *pkt)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       nft_data_copy(&data[priv->dreg], &priv->data);
+}
+
+static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
+       [NFTA_IMMEDIATE_DREG]   = { .type = NLA_U32 },
+       [NFTA_IMMEDIATE_DATA]   = { .type = NLA_NESTED },
+};
+
+static int nft_immediate_init(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nlattr * const tb[])
+{
+       struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       int err;
+
+       if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
+           tb[NFTA_IMMEDIATE_DATA] == NULL)
+               return -EINVAL;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
+       if (err < 0)
+               return err;
+       priv->dlen = desc.len;
+
+       err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
+       if (err < 0)
+               goto err1;
+
+       return 0;
+
+err1:
+       nft_data_uninit(&priv->data, desc.type);
+       return err;
+}
+
+static void nft_immediate_destroy(const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
+}
+
+static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+
+       return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
+                            nft_dreg_to_type(priv->dreg), priv->dlen);
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_immediate_validate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 const struct nft_data **data)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (priv->dreg == NFT_REG_VERDICT)
+               *data = &priv->data;
+
+       return 0;
+}
+
+static struct nft_expr_type nft_imm_type;
+static const struct nft_expr_ops nft_imm_ops = {
+       .type           = &nft_imm_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
+       .eval           = nft_immediate_eval,
+       .init           = nft_immediate_init,
+       .destroy        = nft_immediate_destroy,
+       .dump           = nft_immediate_dump,
+       .validate       = nft_immediate_validate,
+};
+
+static struct nft_expr_type nft_imm_type __read_mostly = {
+       .name           = "immediate",
+       .ops            = &nft_imm_ops,
+       .policy         = nft_immediate_policy,
+       .maxattr        = NFTA_IMMEDIATE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_immediate_module_init(void)
+{
+       return nft_register_expr(&nft_imm_type);
+}
+
+void nft_immediate_module_exit(void)
+{
+       nft_unregister_expr(&nft_imm_type);
+}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
new file mode 100644 (file)
index 0000000..85da5bd
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+static DEFINE_SPINLOCK(limit_lock);
+
+struct nft_limit {
+       u64             tokens;
+       u64             rate;
+       u64             unit;
+       unsigned long   stamp;
+};
+
+static void nft_limit_eval(const struct nft_expr *expr,
+                          struct nft_data data[NFT_REG_MAX + 1],
+                          const struct nft_pktinfo *pkt)
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       spin_lock_bh(&limit_lock);
+       if (time_after_eq(jiffies, priv->stamp)) {
+               priv->tokens = priv->rate;
+               priv->stamp = jiffies + priv->unit * HZ;
+       }
+
+       if (priv->tokens >= 1) {
+               priv->tokens--;
+               spin_unlock_bh(&limit_lock);
+               return;
+       }
+       spin_unlock_bh(&limit_lock);
+
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
+};
+
+static int nft_limit_init(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nlattr * const tb[])
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_LIMIT_RATE] == NULL ||
+           tb[NFTA_LIMIT_UNIT] == NULL)
+               return -EINVAL;
+
+       priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+       priv->stamp  = jiffies + priv->unit * HZ;
+       priv->tokens = priv->rate;
+       return 0;
+}
+
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_limit *priv = nft_expr_priv(expr);
+
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_limit_type;
+static const struct nft_expr_ops nft_limit_ops = {
+       .type           = &nft_limit_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
+       .eval           = nft_limit_eval,
+       .init           = nft_limit_init,
+       .dump           = nft_limit_dump,
+};
+
+static struct nft_expr_type nft_limit_type __read_mostly = {
+       .name           = "limit",
+       .ops            = &nft_limit_ops,
+       .policy         = nft_limit_policy,
+       .maxattr        = NFTA_LIMIT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_limit_module_init(void)
+{
+       return nft_register_expr(&nft_limit_type);
+}
+
+static void __exit nft_limit_module_exit(void)
+{
+       nft_unregister_expr(&nft_limit_type);
+}
+
+module_init(nft_limit_module_init);
+module_exit(nft_limit_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("limit");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
new file mode 100644 (file)
index 0000000..57cad07
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <linux/netdevice.h>
+
+static const char *nft_log_null_prefix = "";
+
+struct nft_log {
+       struct nf_loginfo       loginfo;
+       char                    *prefix;
+       int                     family;
+};
+
+static void nft_log_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_log *priv = nft_expr_priv(expr);
+       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+       nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+                     pkt->out, &priv->loginfo, "%s", priv->prefix);
+}
+
+static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
+       [NFTA_LOG_GROUP]        = { .type = NLA_U16 },
+       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
+       [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
+       [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
+};
+
+static int nft_log_init(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_log *priv = nft_expr_priv(expr);
+       struct nf_loginfo *li = &priv->loginfo;
+       const struct nlattr *nla;
+
+       priv->family = ctx->afi->family;
+
+       nla = tb[NFTA_LOG_PREFIX];
+       if (nla != NULL) {
+               priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+               if (priv->prefix == NULL)
+                       return -ENOMEM;
+               nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
+       } else
+               priv->prefix = (char *)nft_log_null_prefix;
+
+       li->type = NF_LOG_TYPE_ULOG;
+       if (tb[NFTA_LOG_GROUP] != NULL)
+               li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+
+       if (tb[NFTA_LOG_SNAPLEN] != NULL)
+               li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+       if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+               li->u.ulog.qthreshold =
+                       ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+       }
+
+       return 0;
+}
+
+static void nft_log_destroy(const struct nft_expr *expr)
+{
+       struct nft_log *priv = nft_expr_priv(expr);
+
+       if (priv->prefix != nft_log_null_prefix)
+               kfree(priv->prefix);
+}
+
+static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_log *priv = nft_expr_priv(expr);
+       const struct nf_loginfo *li = &priv->loginfo;
+
+       if (priv->prefix != nft_log_null_prefix)
+               if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
+                       goto nla_put_failure;
+       if (li->u.ulog.group)
+               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
+                       goto nla_put_failure;
+       if (li->u.ulog.copy_len)
+               if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+                                htonl(li->u.ulog.copy_len)))
+                       goto nla_put_failure;
+       if (li->u.ulog.qthreshold)
+               if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+                                htons(li->u.ulog.qthreshold)))
+                       goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_log_type;
+static const struct nft_expr_ops nft_log_ops = {
+       .type           = &nft_log_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_log)),
+       .eval           = nft_log_eval,
+       .init           = nft_log_init,
+       .destroy        = nft_log_destroy,
+       .dump           = nft_log_dump,
+};
+
+static struct nft_expr_type nft_log_type __read_mostly = {
+       .name           = "log",
+       .ops            = &nft_log_ops,
+       .policy         = nft_log_policy,
+       .maxattr        = NFTA_LOG_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_log_module_init(void)
+{
+       return nft_register_expr(&nft_log_type);
+}
+
+static void __exit nft_log_module_exit(void)
+{
+       nft_unregister_expr(&nft_log_type);
+}
+
+module_init(nft_log_module_init);
+module_exit(nft_log_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("log");
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
new file mode 100644 (file)
index 0000000..8a6116b
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_lookup {
+       struct nft_set                  *set;
+       enum nft_registers              sreg:8;
+       enum nft_registers              dreg:8;
+       struct nft_set_binding          binding;
+};
+
+static void nft_lookup_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       const struct nft_lookup *priv = nft_expr_priv(expr);
+       const struct nft_set *set = priv->set;
+
+       if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+               return;
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
+       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING },
+       [NFTA_LOOKUP_SREG]      = { .type = NLA_U32 },
+       [NFTA_LOOKUP_DREG]      = { .type = NLA_U32 },
+};
+
+static int nft_lookup_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+       struct nft_set *set;
+       int err;
+
+       if (tb[NFTA_LOOKUP_SET] == NULL ||
+           tb[NFTA_LOOKUP_SREG] == NULL)
+               return -EINVAL;
+
+       set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_LOOKUP_DREG] != NULL) {
+               if (!(set->flags & NFT_SET_MAP))
+                       return -EINVAL;
+
+               priv->dreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_DREG]));
+               err = nft_validate_output_register(priv->dreg);
+               if (err < 0)
+                       return err;
+
+               if (priv->dreg == NFT_REG_VERDICT) {
+                       if (set->dtype != NFT_DATA_VERDICT)
+                               return -EINVAL;
+               } else if (set->dtype == NFT_DATA_VERDICT)
+                       return -EINVAL;
+       } else if (set->flags & NFT_SET_MAP)
+               return -EINVAL;
+
+       err = nf_tables_bind_set(ctx, set, &priv->binding);
+       if (err < 0)
+               return err;
+
+       priv->set = set;
+       return 0;
+}
+
+static void nft_lookup_destroy(const struct nft_expr *expr)
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(NULL, priv->set, &priv->binding);
+}
+
+static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_lookup *priv = nft_expr_priv(expr);
+
+       if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_LOOKUP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (priv->set->flags & NFT_SET_MAP)
+               if (nla_put_be32(skb, NFTA_LOOKUP_DREG, htonl(priv->dreg)))
+                       goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_lookup_type;
+static const struct nft_expr_ops nft_lookup_ops = {
+       .type           = &nft_lookup_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
+       .eval           = nft_lookup_eval,
+       .init           = nft_lookup_init,
+       .destroy        = nft_lookup_destroy,
+       .dump           = nft_lookup_dump,
+};
+
+static struct nft_expr_type nft_lookup_type __read_mostly = {
+       .name           = "lookup",
+       .ops            = &nft_lookup_ops,
+       .policy         = nft_lookup_policy,
+       .maxattr        = NFTA_LOOKUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_lookup_module_init(void)
+{
+       return nft_register_expr(&nft_lookup_type);
+}
+
+void nft_lookup_module_exit(void)
+{
+       nft_unregister_expr(&nft_lookup_type);
+}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
new file mode 100644 (file)
index 0000000..8c28220
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+       enum nft_meta_keys      key:8;
+       enum nft_registers      dreg:8;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+                         struct nft_data data[NFT_REG_MAX + 1],
+                         const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       const struct net_device *in = pkt->in, *out = pkt->out;
+       struct nft_data *dest = &data[priv->dreg];
+
+       switch (priv->key) {
+       case NFT_META_LEN:
+               dest->data[0] = skb->len;
+               break;
+       case NFT_META_PROTOCOL:
+               *(__be16 *)dest->data = skb->protocol;
+               break;
+       case NFT_META_PRIORITY:
+               dest->data[0] = skb->priority;
+               break;
+       case NFT_META_MARK:
+               dest->data[0] = skb->mark;
+               break;
+       case NFT_META_IIF:
+               if (in == NULL)
+                       goto err;
+               dest->data[0] = in->ifindex;
+               break;
+       case NFT_META_OIF:
+               if (out == NULL)
+                       goto err;
+               dest->data[0] = out->ifindex;
+               break;
+       case NFT_META_IIFNAME:
+               if (in == NULL)
+                       goto err;
+               strncpy((char *)dest->data, in->name, sizeof(dest->data));
+               break;
+       case NFT_META_OIFNAME:
+               if (out == NULL)
+                       goto err;
+               strncpy((char *)dest->data, out->name, sizeof(dest->data));
+               break;
+       case NFT_META_IIFTYPE:
+               if (in == NULL)
+                       goto err;
+               *(u16 *)dest->data = in->type;
+               break;
+       case NFT_META_OIFTYPE:
+               if (out == NULL)
+                       goto err;
+               *(u16 *)dest->data = out->type;
+               break;
+       case NFT_META_SKUID:
+               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+                       goto err;
+
+               read_lock_bh(&skb->sk->sk_callback_lock);
+               if (skb->sk->sk_socket == NULL ||
+                   skb->sk->sk_socket->file == NULL) {
+                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       goto err;
+               }
+
+               dest->data[0] =
+                       from_kuid_munged(&init_user_ns,
+                               skb->sk->sk_socket->file->f_cred->fsuid);
+               read_unlock_bh(&skb->sk->sk_callback_lock);
+               break;
+       case NFT_META_SKGID:
+               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+                       goto err;
+
+               read_lock_bh(&skb->sk->sk_callback_lock);
+               if (skb->sk->sk_socket == NULL ||
+                   skb->sk->sk_socket->file == NULL) {
+                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       goto err;
+               }
+               dest->data[0] =
+                       from_kgid_munged(&init_user_ns,
+                                skb->sk->sk_socket->file->f_cred->fsgid);
+               read_unlock_bh(&skb->sk->sk_callback_lock);
+               break;
+#ifdef CONFIG_NET_CLS_ROUTE
+       case NFT_META_RTCLASSID: {
+               const struct dst_entry *dst = skb_dst(skb);
+
+               if (dst == NULL)
+                       goto err;
+               dest->data[0] = dst->tclassid;
+               break;
+       }
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+               dest->data[0] = skb->secmark;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+               goto err;
+       }
+       return;
+
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+       [NFTA_META_DREG]        = { .type = NLA_U32 },
+       [NFTA_META_KEY]         = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                        const struct nlattr * const tb[])
+{
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_META_DREG] == NULL ||
+           tb[NFTA_META_KEY] == NULL)
+               return -EINVAL;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
+       case NFT_META_LEN:
+       case NFT_META_PROTOCOL:
+       case NFT_META_PRIORITY:
+       case NFT_META_MARK:
+       case NFT_META_IIF:
+       case NFT_META_OIF:
+       case NFT_META_IIFNAME:
+       case NFT_META_OIFNAME:
+       case NFT_META_IIFTYPE:
+       case NFT_META_OIFTYPE:
+       case NFT_META_SKUID:
+       case NFT_META_SKGID:
+#ifdef CONFIG_NET_CLS_ROUTE
+       case NFT_META_RTCLASSID:
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+#endif
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_meta_type;
+static const struct nft_expr_ops nft_meta_ops = {
+       .type           = &nft_meta_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_eval,
+       .init           = nft_meta_init,
+       .dump           = nft_meta_dump,
+};
+
+static struct nft_expr_type nft_meta_type __read_mostly = {
+       .name           = "meta",
+       .ops            = &nft_meta_ops,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_meta_module_init(void)
+{
+       return nft_register_expr(&nft_meta_type);
+}
+
+static void __exit nft_meta_module_exit(void)
+{
+       nft_unregister_expr(&nft_meta_type);
+}
+
+module_init(nft_meta_module_init);
+module_exit(nft_meta_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_meta_target.c b/net/netfilter/nft_meta_target.c
new file mode 100644 (file)
index 0000000..71177df
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+       enum nft_meta_keys      key;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+                         struct nft_data *nfres,
+                         struct nft_data *data,
+                         const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *meta = nft_expr_priv(expr);
+       struct sk_buff *skb = pkt->skb;
+       u32 val = data->data[0];
+
+       switch (meta->key) {
+       case NFT_META_MARK:
+               skb->mark = val;
+               break;
+       case NFT_META_PRIORITY:
+               skb->priority = val;
+               break;
+       case NFT_META_NFTRACE:
+               skb->nf_trace = val;
+               break;
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+               skb->secmark = val;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+       }
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+       [NFTA_META_KEY]         = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_expr *expr, struct nlattr *tb[])
+{
+       struct nft_meta *meta = nft_expr_priv(expr);
+
+       if (tb[NFTA_META_KEY] == NULL)
+               return -EINVAL;
+
+       meta->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (meta->key) {
+       case NFT_META_MARK:
+       case NFT_META_PRIORITY:
+       case NFT_META_NFTRACE:
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+#endif
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_meta *meta = nft_expr_priv(expr);
+
+       NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key));
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_ops meta_target __read_mostly = {
+       .name           = "meta",
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .owner          = THIS_MODULE,
+       .eval           = nft_meta_eval,
+       .init           = nft_meta_init,
+       .dump           = nft_meta_dump,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+};
+
+static int __init nft_meta_target_init(void)
+{
+       return nft_register_expr(&meta_target);
+}
+
+static void __exit nft_meta_target_exit(void)
+{
+       nft_unregister_expr(&meta_target);
+}
+
+module_init(nft_meta_target_init);
+module_exit(nft_meta_target_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
new file mode 100644 (file)
index 0000000..b0b87b2
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+struct nft_nat {
+       enum nft_registers      sreg_addr_min:8;
+       enum nft_registers      sreg_addr_max:8;
+       enum nft_registers      sreg_proto_min:8;
+       enum nft_registers      sreg_proto_max:8;
+       int                     family;
+       enum nf_nat_manip_type  type;
+};
+
+static void nft_nat_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_nat *priv = nft_expr_priv(expr);
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
+       struct nf_nat_range range;
+
+       memset(&range, 0, sizeof(range));
+       if (priv->sreg_addr_min) {
+               if (priv->family == AF_INET) {
+                       range.min_addr.ip = data[priv->sreg_addr_min].data[0];
+                       range.max_addr.ip = data[priv->sreg_addr_max].data[0];
+
+               } else {
+                       memcpy(range.min_addr.ip6,
+                              data[priv->sreg_addr_min].data,
+                              sizeof(struct nft_data));
+                       memcpy(range.max_addr.ip6,
+                              data[priv->sreg_addr_max].data,
+                              sizeof(struct nft_data));
+               }
+               range.flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (priv->sreg_proto_min) {
+               range.min_proto.all = data[priv->sreg_proto_min].data[0];
+               range.max_proto.all = data[priv->sreg_proto_max].data[0];
+               range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+       }
+
+       data[NFT_REG_VERDICT].verdict =
+               nf_nat_setup_info(ct, &range, priv->type);
+}
+
+static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
+       [NFTA_NAT_TYPE]          = { .type = NLA_U32 },
+       [NFTA_NAT_FAMILY]        = { .type = NLA_U32 },
+       [NFTA_NAT_REG_ADDR_MIN]  = { .type = NLA_U32 },
+       [NFTA_NAT_REG_ADDR_MAX]  = { .type = NLA_U32 },
+       [NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
+       [NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
+};
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_nat *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_NAT_TYPE] == NULL)
+               return -EINVAL;
+
+       switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
+       case NFT_NAT_SNAT:
+               priv->type = NF_NAT_MANIP_SRC;
+               break;
+       case NFT_NAT_DNAT:
+               priv->type = NF_NAT_MANIP_DST;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (tb[NFTA_NAT_FAMILY] == NULL)
+               return -EINVAL;
+
+       priv->family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
+       if (priv->family != AF_INET && priv->family != AF_INET6)
+               return -EINVAL;
+
+       if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+               priv->sreg_addr_min = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_ADDR_MIN]));
+               err = nft_validate_input_register(priv->sreg_addr_min);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+               priv->sreg_addr_max = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_ADDR_MAX]));
+               err = nft_validate_input_register(priv->sreg_addr_max);
+               if (err < 0)
+                       return err;
+       } else
+               priv->sreg_addr_max = priv->sreg_addr_min;
+
+       if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+               priv->sreg_proto_min = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_PROTO_MIN]));
+               err = nft_validate_input_register(priv->sreg_proto_min);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+               priv->sreg_proto_max = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_PROTO_MAX]));
+               err = nft_validate_input_register(priv->sreg_proto_max);
+               if (err < 0)
+                       return err;
+       } else
+               priv->sreg_proto_max = priv->sreg_proto_min;
+
+       return 0;
+}
+
+static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_nat *priv = nft_expr_priv(expr);
+
+       switch (priv->type) {
+       case NF_NAT_MANIP_SRC:
+               if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
+                       goto nla_put_failure;
+               break;
+       case NF_NAT_MANIP_DST:
+               if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
+                       goto nla_put_failure;
+               break;
+       }
+
+       if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_nat_type;
+static const struct nft_expr_ops nft_nat_ops = {
+       .type           = &nft_nat_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
+       .eval           = nft_nat_eval,
+       .init           = nft_nat_init,
+       .dump           = nft_nat_dump,
+};
+
+static struct nft_expr_type nft_nat_type __read_mostly = {
+       .name           = "nat",
+       .ops            = &nft_nat_ops,
+       .policy         = nft_nat_policy,
+       .maxattr        = NFTA_NAT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_nat_module_init(void)
+{
+       int err;
+
+       err = nft_register_expr(&nft_nat_type);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_nat_module_exit(void)
+{
+       nft_unregister_expr(&nft_nat_type);
+}
+
+module_init(nft_nat_module_init);
+module_exit(nft_nat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_EXPR("nat");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
new file mode 100644 (file)
index 0000000..a2aeb31
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+static void nft_payload_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       struct nft_data *dest = &data[priv->dreg];
+       int offset;
+
+       switch (priv->base) {
+       case NFT_PAYLOAD_LL_HEADER:
+               if (!skb_mac_header_was_set(skb))
+                       goto err;
+               offset = skb_mac_header(skb) - skb->data;
+               break;
+       case NFT_PAYLOAD_NETWORK_HEADER:
+               offset = skb_network_offset(skb);
+               break;
+       case NFT_PAYLOAD_TRANSPORT_HEADER:
+               offset = pkt->xt.thoff;
+               break;
+       default:
+               BUG();
+       }
+       offset += priv->offset;
+
+       if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0)
+               goto err;
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
+       [NFTA_PAYLOAD_DREG]     = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_BASE]     = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_OFFSET]   = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_LEN]      = { .type = NLA_U32 },
+};
+
+static int nft_payload_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_payload *priv = nft_expr_priv(expr);
+       int err;
+
+       priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+       priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+       priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_payload_type;
+static const struct nft_expr_ops nft_payload_ops = {
+       .type           = &nft_payload_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+       .eval           = nft_payload_eval,
+       .init           = nft_payload_init,
+       .dump           = nft_payload_dump,
+};
+
+const struct nft_expr_ops nft_payload_fast_ops = {
+       .type           = &nft_payload_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+       .eval           = nft_payload_eval,
+       .init           = nft_payload_init,
+       .dump           = nft_payload_dump,
+};
+
+static const struct nft_expr_ops *
+nft_payload_select_ops(const struct nft_ctx *ctx,
+                      const struct nlattr * const tb[])
+{
+       enum nft_payload_bases base;
+       unsigned int offset, len;
+
+       if (tb[NFTA_PAYLOAD_DREG] == NULL ||
+           tb[NFTA_PAYLOAD_BASE] == NULL ||
+           tb[NFTA_PAYLOAD_OFFSET] == NULL ||
+           tb[NFTA_PAYLOAD_LEN] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+       switch (base) {
+       case NFT_PAYLOAD_LL_HEADER:
+       case NFT_PAYLOAD_NETWORK_HEADER:
+       case NFT_PAYLOAD_TRANSPORT_HEADER:
+               break;
+       default:
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+       len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+       if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
+               return ERR_PTR(-EINVAL);
+
+       if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+               return &nft_payload_fast_ops;
+       else
+               return &nft_payload_ops;
+}
+
+static struct nft_expr_type nft_payload_type __read_mostly = {
+       .name           = "payload",
+       .select_ops     = nft_payload_select_ops,
+       .policy         = nft_payload_policy,
+       .maxattr        = NFTA_PAYLOAD_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_payload_module_init(void)
+{
+       return nft_register_expr(&nft_payload_type);
+}
+
+void nft_payload_module_exit(void)
+{
+       nft_unregister_expr(&nft_payload_type);
+}
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
new file mode 100644 (file)
index 0000000..ca0c1b2
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_rbtree {
+       struct rb_root          root;
+};
+
+struct nft_rbtree_elem {
+       struct rb_node          node;
+       u16                     flags;
+       struct nft_data         key;
+       struct nft_data         data[];
+};
+
+static bool nft_rbtree_lookup(const struct nft_set *set,
+                             const struct nft_data *key,
+                             struct nft_data *data)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct nft_rbtree_elem *rbe, *interval = NULL;
+       const struct rb_node *parent = priv->root.rb_node;
+       int d;
+
+       while (parent != NULL) {
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+               d = nft_data_cmp(&rbe->key, key, set->klen);
+               if (d < 0) {
+                       parent = parent->rb_left;
+                       interval = rbe;
+               } else if (d > 0)
+                       parent = parent->rb_right;
+               else {
+found:
+                       if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+                               goto out;
+                       if (set->flags & NFT_SET_MAP)
+                               nft_data_copy(data, rbe->data);
+                       return true;
+               }
+       }
+
+       if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
+               rbe = interval;
+               goto found;
+       }
+out:
+       return false;
+}
+
+static void nft_rbtree_elem_destroy(const struct nft_set *set,
+                                   struct nft_rbtree_elem *rbe)
+{
+       nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(rbe->data, set->dtype);
+       kfree(rbe);
+}
+
+static int __nft_rbtree_insert(const struct nft_set *set,
+                              struct nft_rbtree_elem *new)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe;
+       struct rb_node *parent, **p;
+       int d;
+
+       parent = NULL;
+       p = &priv->root.rb_node;
+       while (*p != NULL) {
+               parent = *p;
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+               d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+               if (d < 0)
+                       p = &parent->rb_left;
+               else if (d > 0)
+                       p = &parent->rb_right;
+               else
+                       return -EEXIST;
+       }
+       rb_link_node(&new->node, parent, p);
+       rb_insert_color(&new->node, &priv->root);
+       return 0;
+}
+
+static int nft_rbtree_insert(const struct nft_set *set,
+                            const struct nft_set_elem *elem)
+{
+       struct nft_rbtree_elem *rbe;
+       unsigned int size;
+       int err;
+
+       size = sizeof(*rbe);
+       if (set->flags & NFT_SET_MAP)
+               size += sizeof(rbe->data[0]);
+
+       rbe = kzalloc(size, GFP_KERNEL);
+       if (rbe == NULL)
+               return -ENOMEM;
+
+       rbe->flags = elem->flags;
+       nft_data_copy(&rbe->key, &elem->key);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_copy(rbe->data, &elem->data);
+
+       err = __nft_rbtree_insert(set, rbe);
+       if (err < 0)
+               kfree(rbe);
+       return err;
+}
+
+static void nft_rbtree_remove(const struct nft_set *set,
+                             const struct nft_set_elem *elem)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe = elem->cookie;
+
+       rb_erase(&rbe->node, &priv->root);
+       kfree(rbe);
+}
+
+static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct rb_node *parent = priv->root.rb_node;
+       struct nft_rbtree_elem *rbe;
+       int d;
+
+       while (parent != NULL) {
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+               d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+               if (d < 0)
+                       parent = parent->rb_left;
+               else if (d > 0)
+                       parent = parent->rb_right;
+               else {
+                       elem->cookie = rbe;
+                       if (set->flags & NFT_SET_MAP)
+                               nft_data_copy(&elem->data, rbe->data);
+                       elem->flags = rbe->flags;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static void nft_rbtree_walk(const struct nft_ctx *ctx,
+                           const struct nft_set *set,
+                           struct nft_set_iter *iter)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct nft_rbtree_elem *rbe;
+       struct nft_set_elem elem;
+       struct rb_node *node;
+
+       for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+               if (iter->count < iter->skip)
+                       goto cont;
+
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+               nft_data_copy(&elem.key, &rbe->key);
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&elem.data, rbe->data);
+               elem.flags = rbe->flags;
+
+               iter->err = iter->fn(ctx, set, iter, &elem);
+               if (iter->err < 0)
+                       return;
+cont:
+               iter->count++;
+       }
+}
+
+static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
+{
+       return sizeof(struct nft_rbtree);
+}
+
+static int nft_rbtree_init(const struct nft_set *set,
+                          const struct nlattr * const nla[])
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+
+       priv->root = RB_ROOT;
+       return 0;
+}
+
+static void nft_rbtree_destroy(const struct nft_set *set)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe;
+       struct rb_node *node;
+
+       while ((node = priv->root.rb_node) != NULL) {
+               rb_erase(node, &priv->root);
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+               nft_rbtree_elem_destroy(set, rbe);
+       }
+}
+
+static struct nft_set_ops nft_rbtree_ops __read_mostly = {
+       .privsize       = nft_rbtree_privsize,
+       .init           = nft_rbtree_init,
+       .destroy        = nft_rbtree_destroy,
+       .insert         = nft_rbtree_insert,
+       .remove         = nft_rbtree_remove,
+       .get            = nft_rbtree_get,
+       .lookup         = nft_rbtree_lookup,
+       .walk           = nft_rbtree_walk,
+       .features       = NFT_SET_INTERVAL | NFT_SET_MAP,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_rbtree_module_init(void)
+{
+       return nft_register_set(&nft_rbtree_ops);
+}
+
+static void __exit nft_rbtree_module_exit(void)
+{
+       nft_unregister_set(&nft_rbtree_ops);
+}
+
+module_init(nft_rbtree_module_init);
+module_exit(nft_rbtree_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
index 8b03028cca69b616df298e0ffbfdeb247441a341..227aa11e8409bb18be93c0c432a4d71cfb6e8f38 100644 (file)
@@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table,
                return NULL;
        }
 
-       table->private = newinfo;
        newinfo->initial_entries = private->initial_entries;
+       /*
+        * Ensure contents of newinfo are visible before assigning to
+        * private.
+        */
+       smp_wmb();
+       table->private = newinfo;
 
        /*
         * Even though table entries have now been swapped, other CPU's
index cd24290f3b2fdbff20088a0ffd2692fb6240a9ce..e762de5ee89bfa480b1789ce164cb024ac35e91e 100644 (file)
@@ -43,10 +43,42 @@ optlen(const u_int8_t *opt, unsigned int offset)
                return opt[offset+1];
 }
 
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+                                   const struct sk_buff *skb,
+                                   unsigned int family)
+{
+       struct flowi fl;
+       const struct nf_afinfo *ai;
+       struct rtable *rt = NULL;
+       u_int32_t mtu     = ~0U;
+
+       if (family == PF_INET) {
+               struct flowi4 *fl4 = &fl.u.ip4;
+               memset(fl4, 0, sizeof(*fl4));
+               fl4->daddr = ip_hdr(skb)->saddr;
+       } else {
+               struct flowi6 *fl6 = &fl.u.ip6;
+
+               memset(fl6, 0, sizeof(*fl6));
+               fl6->daddr = ipv6_hdr(skb)->saddr;
+       }
+       rcu_read_lock();
+       ai = nf_get_afinfo(family);
+       if (ai != NULL)
+               ai->route(net, (struct dst_entry **)&rt, &fl, false);
+       rcu_read_unlock();
+
+       if (rt != NULL) {
+               mtu = dst_mtu(&rt->dst);
+               dst_release(&rt->dst);
+       }
+       return mtu;
+}
+
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
                     const struct xt_action_param *par,
-                    unsigned int in_mtu,
+                    unsigned int family,
                     unsigned int tcphoff,
                     unsigned int minlen)
 {
@@ -76,6 +108,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+               struct net *net = dev_net(par->in ? par->in : par->out);
+               unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+
                if (dst_mtu(skb_dst(skb)) <= minlen) {
                        net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
                                            dst_mtu(skb_dst(skb)));
@@ -165,37 +200,6 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        return TCPOLEN_MSS;
 }
 
-static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
-                                   unsigned int family)
-{
-       struct flowi fl;
-       const struct nf_afinfo *ai;
-       struct rtable *rt = NULL;
-       u_int32_t mtu     = ~0U;
-
-       if (family == PF_INET) {
-               struct flowi4 *fl4 = &fl.u.ip4;
-               memset(fl4, 0, sizeof(*fl4));
-               fl4->daddr = ip_hdr(skb)->saddr;
-       } else {
-               struct flowi6 *fl6 = &fl.u.ip6;
-
-               memset(fl6, 0, sizeof(*fl6));
-               fl6->daddr = ipv6_hdr(skb)->saddr;
-       }
-       rcu_read_lock();
-       ai = nf_get_afinfo(family);
-       if (ai != NULL)
-               ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
-       rcu_read_unlock();
-
-       if (rt != NULL) {
-               mtu = dst_mtu(&rt->dst);
-               dst_release(&rt->dst);
-       }
-       return mtu;
-}
-
 static unsigned int
 tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -204,7 +208,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        int ret;
 
        ret = tcpmss_mangle_packet(skb, par,
-                                  tcpmss_reverse_mtu(skb, PF_INET),
+                                  PF_INET,
                                   iph->ihl * 4,
                                   sizeof(*iph) + sizeof(struct tcphdr));
        if (ret < 0)
@@ -233,7 +237,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (tcphoff < 0)
                return NF_DROP;
        ret = tcpmss_mangle_packet(skb, par,
-                                  tcpmss_reverse_mtu(skb, PF_INET6),
+                                  PF_INET6,
                                   tcphoff,
                                   sizeof(*ipv6h) + sizeof(struct tcphdr));
        if (ret < 0)
index 5d8a3a3cd5a7cd04b714d92a1366c04ad464e716..ef8a926752a97542f6f2f8eeb378e150958bff3d 100644 (file)
@@ -200,7 +200,7 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
                                     in->ifindex);
                if (sk) {
                        int connected = (sk->sk_state == TCP_ESTABLISHED);
-                       int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+                       int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr);
 
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
index 31790e789e224f8fb758ee87e62f8dd8908e81dd..e7c4e0e01ff5de40fb32cdeddd70cb204b84ea32 100644 (file)
@@ -81,7 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        struct xt_set_info_match_v0 *info = par->matchinfo;
        ip_set_id_t index;
 
-       index = ip_set_nfnl_get_byindex(info->match_set.index);
+       index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
        if (index == IPSET_INVALID_ID) {
                pr_warning("Cannot find set indentified by id %u to match\n",
@@ -91,7 +91,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
                pr_warning("Protocol error: set match dimension "
                           "is over the limit!\n");
-               ip_set_nfnl_put(info->match_set.index);
+               ip_set_nfnl_put(par->net, info->match_set.index);
                return -ERANGE;
        }
 
@@ -106,9 +106,104 @@ set_match_v0_destroy(const struct xt_mtdtor_param *par)
 {
        struct xt_set_info_match_v0 *info = par->matchinfo;
 
-       ip_set_nfnl_put(info->match_set.index);
+       ip_set_nfnl_put(par->net, info->match_set.index);
 }
 
+/* Revision 1 match */
+
+static bool
+set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v1 *info = par->matchinfo;
+       ADT_OPT(opt, par->family, info->match_set.dim,
+               info->match_set.flags, 0, UINT_MAX);
+
+       if (opt.flags & IPSET_RETURN_NOMATCH)
+               opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
+
+       return match_set(info->match_set.index, skb, par, &opt,
+                        info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_v1_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match_v1 *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.dim > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               ip_set_nfnl_put(par->net, info->match_set.index);
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_match_v1_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match_v1 *info = par->matchinfo;
+
+       ip_set_nfnl_put(par->net, info->match_set.index);
+}
+
+/* Revision 3 match */
+
+static bool
+match_counter(u64 counter, const struct ip_set_counter_match *info)
+{
+       switch (info->op) {
+       case IPSET_COUNTER_NONE:
+               return true;
+       case IPSET_COUNTER_EQ:
+               return counter == info->value;
+       case IPSET_COUNTER_NE:
+               return counter != info->value;
+       case IPSET_COUNTER_LT:
+               return counter < info->value;
+       case IPSET_COUNTER_GT:
+               return counter > info->value;
+       }
+       return false;
+}
+
+static bool
+set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v3 *info = par->matchinfo;
+       ADT_OPT(opt, par->family, info->match_set.dim,
+               info->match_set.flags, info->flags, UINT_MAX);
+       int ret;
+
+       if (info->packets.op != IPSET_COUNTER_NONE ||
+           info->bytes.op != IPSET_COUNTER_NONE)
+               opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
+
+       ret = match_set(info->match_set.index, skb, par, &opt,
+                       info->match_set.flags & IPSET_INV_MATCH);
+
+       if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
+               return ret;
+
+       if (!match_counter(opt.ext.packets, &info->packets))
+               return 0;
+       return match_counter(opt.ext.bytes, &info->bytes);
+}
+
+#define set_match_v3_checkentry        set_match_v1_checkentry
+#define set_match_v3_destroy   set_match_v1_destroy
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
 static unsigned int
 set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -133,7 +228,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        ip_set_id_t index;
 
        if (info->add_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find add_set index %u as target\n",
                                   info->add_set.index);
@@ -142,12 +237,12 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        }
 
        if (info->del_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
-                               ip_set_nfnl_put(info->add_set.index);
+                               ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -156,9 +251,9 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->add_set.index);
+                       ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->del_set.index);
+                       ip_set_nfnl_put(par->net, info->del_set.index);
                return -ERANGE;
        }
 
@@ -175,57 +270,12 @@ set_target_v0_destroy(const struct xt_tgdtor_param *par)
        const struct xt_set_info_target_v0 *info = par->targinfo;
 
        if (info->add_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->add_set.index);
+               ip_set_nfnl_put(par->net, info->add_set.index);
        if (info->del_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->del_set.index);
+               ip_set_nfnl_put(par->net, info->del_set.index);
 }
 
-/* Revision 1 match and target */
-
-static bool
-set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       const struct xt_set_info_match_v1 *info = par->matchinfo;
-       ADT_OPT(opt, par->family, info->match_set.dim,
-               info->match_set.flags, 0, UINT_MAX);
-
-       if (opt.flags & IPSET_RETURN_NOMATCH)
-               opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
-
-       return match_set(info->match_set.index, skb, par, &opt,
-                        info->match_set.flags & IPSET_INV_MATCH);
-}
-
-static int
-set_match_v1_checkentry(const struct xt_mtchk_param *par)
-{
-       struct xt_set_info_match_v1 *info = par->matchinfo;
-       ip_set_id_t index;
-
-       index = ip_set_nfnl_get_byindex(info->match_set.index);
-
-       if (index == IPSET_INVALID_ID) {
-               pr_warning("Cannot find set indentified by id %u to match\n",
-                          info->match_set.index);
-               return -ENOENT;
-       }
-       if (info->match_set.dim > IPSET_DIM_MAX) {
-               pr_warning("Protocol error: set match dimension "
-                          "is over the limit!\n");
-               ip_set_nfnl_put(info->match_set.index);
-               return -ERANGE;
-       }
-
-       return 0;
-}
-
-static void
-set_match_v1_destroy(const struct xt_mtdtor_param *par)
-{
-       struct xt_set_info_match_v1 *info = par->matchinfo;
-
-       ip_set_nfnl_put(info->match_set.index);
-}
+/* Revision 1 target */
 
 static unsigned int
 set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
@@ -251,7 +301,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        ip_set_id_t index;
 
        if (info->add_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find add_set index %u as target\n",
                                   info->add_set.index);
@@ -260,12 +310,12 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        }
 
        if (info->del_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
-                               ip_set_nfnl_put(info->add_set.index);
+                               ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -274,9 +324,9 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->add_set.index);
+                       ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->del_set.index);
+                       ip_set_nfnl_put(par->net, info->del_set.index);
                return -ERANGE;
        }
 
@@ -289,9 +339,9 @@ set_target_v1_destroy(const struct xt_tgdtor_param *par)
        const struct xt_set_info_target_v1 *info = par->targinfo;
 
        if (info->add_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->add_set.index);
+               ip_set_nfnl_put(par->net, info->add_set.index);
        if (info->del_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->del_set.index);
+               ip_set_nfnl_put(par->net, info->del_set.index);
 }
 
 /* Revision 2 target */
@@ -320,52 +370,6 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 #define set_target_v2_checkentry       set_target_v1_checkentry
 #define set_target_v2_destroy          set_target_v1_destroy
 
-/* Revision 3 match */
-
-static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
-       switch (info->op) {
-       case IPSET_COUNTER_NONE:
-               return true;
-       case IPSET_COUNTER_EQ:
-               return counter == info->value;
-       case IPSET_COUNTER_NE:
-               return counter != info->value;
-       case IPSET_COUNTER_LT:
-               return counter < info->value;
-       case IPSET_COUNTER_GT:
-               return counter > info->value;
-       }
-       return false;
-}
-
-static bool
-set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       const struct xt_set_info_match_v3 *info = par->matchinfo;
-       ADT_OPT(opt, par->family, info->match_set.dim,
-               info->match_set.flags, info->flags, UINT_MAX);
-       int ret;
-
-       if (info->packets.op != IPSET_COUNTER_NONE ||
-           info->bytes.op != IPSET_COUNTER_NONE)
-               opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
-
-       ret = match_set(info->match_set.index, skb, par, &opt,
-                       info->match_set.flags & IPSET_INV_MATCH);
-
-       if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-               return ret;
-
-       if (!match_counter(opt.ext.packets, &info->packets))
-               return 0;
-       return match_counter(opt.ext.bytes, &info->bytes);
-}
-
-#define set_match_v3_checkentry        set_match_v1_checkentry
-#define set_match_v3_destroy   set_match_v1_destroy
-
 static struct xt_match set_matches[] __read_mostly = {
        {
                .name           = "set",
index 06df2b9110f5f2b9376c19e34241c257b31e5f63..3dd0e374bc2b3952f845470c67e3a8f77391ff23 100644 (file)
@@ -370,7 +370,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
                 */
                wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
                            sk->sk_state != TCP_TIME_WAIT &&
-                           ipv6_addr_any(&inet6_sk(sk)->rcv_saddr));
+                           ipv6_addr_any(&sk->sk_v6_rcv_saddr));
 
                /* Ignore non-transparent sockets,
                   if XT_SOCKET_TRANSPARENT is used */
index 96a458e12f60ada2186c08ebbafe22c6e68ba06e..dce1bebf7aecb98f4cfae0beb45a9b3c576c21d3 100644 (file)
@@ -817,7 +817,7 @@ int netlbl_req_setattr(struct request_sock *req,
        switch (req->rsk_ops->family) {
        case AF_INET:
                entry = netlbl_domhsh_getentry_af4(secattr->domain,
-                                                  inet_rsk(req)->rmt_addr);
+                                                  inet_rsk(req)->ir_rmt_addr);
                if (entry == NULL) {
                        ret_val = -ENOENT;
                        goto req_setattr_return;
index a481c03e2861f7bcc1de1ec65b56cbc142a2f73d..56e22b74cf965f57ceb90ad0be84b378bbac093f 100644 (file)
@@ -173,7 +173,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->local_df = 1;
 
-       inet_get_local_port_range(&port_min, &port_max);
+       inet_get_local_port_range(net, &port_min, &port_max);
        src_port = vxlan_src_port(port_min, port_max, skb);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
index 642ad42c416ba915680645d8bc3539e5bdee4bf5..378c3a6acf84cab59346ab832d2fffba33e6e543 100644 (file)
@@ -51,10 +51,16 @@ static struct kmem_cache *rds_conn_slab;
 
 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 {
+       static u32 rds_hash_secret __read_mostly;
+
+       unsigned long hash;
+
+       net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
+
        /* Pass NULL, don't need struct net for hash */
-       unsigned long hash = inet_ehashfn(NULL,
-                                         be32_to_cpu(laddr), 0,
-                                         be32_to_cpu(faddr), 0);
+       hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
+                             be32_to_cpu(faddr), 0,
+                             rds_hash_secret);
        return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 }
 
index ec1d731ecff0e2d103b1933f5997317c2dd1403b..48f8ffc60f8f1cee8e63448fe3aa7aaac0d5d4c1 100644 (file)
@@ -749,7 +749,7 @@ void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
                    struct cmsghdr *cmsg);
 
-extern void __rds_put_mr_final(struct rds_mr *mr);
+void __rds_put_mr_final(struct rds_mr *mr);
 static inline void rds_mr_put(struct rds_mr *mr)
 {
        if (atomic_dec_and_test(&mr->r_refcount))
index a693aca2ae2eb61a5c258fa21aa07b05e20c8e53..5f43675ee1df3822ecf98e432797f9b498047b58 100644 (file)
@@ -426,17 +426,16 @@ extern struct workqueue_struct *rxrpc_workqueue;
 /*
  * ar-accept.c
  */
-extern void rxrpc_accept_incoming_calls(struct work_struct *);
-extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *,
-                                           unsigned long);
-extern int rxrpc_reject_call(struct rxrpc_sock *);
+void rxrpc_accept_incoming_calls(struct work_struct *);
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
+int rxrpc_reject_call(struct rxrpc_sock *);
 
 /*
  * ar-ack.c
  */
-extern void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_process_call(struct work_struct *);
+void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_process_call(struct work_struct *);
 
 /*
  * ar-call.c
@@ -445,19 +444,18 @@ extern struct kmem_cache *rxrpc_call_jar;
 extern struct list_head rxrpc_calls;
 extern rwlock_t rxrpc_call_lock;
 
-extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
-                                               struct rxrpc_transport *,
-                                               struct rxrpc_conn_bundle *,
-                                               unsigned long, int, gfp_t);
-extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
-                                             struct rxrpc_connection *,
-                                             struct rxrpc_header *, gfp_t);
-extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
-                                                unsigned long);
-extern void rxrpc_release_call(struct rxrpc_call *);
-extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
-extern void __rxrpc_put_call(struct rxrpc_call *);
-extern void __exit rxrpc_destroy_all_calls(void);
+struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+                                        struct rxrpc_transport *,
+                                        struct rxrpc_conn_bundle *,
+                                        unsigned long, int, gfp_t);
+struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
+                                      struct rxrpc_connection *,
+                                      struct rxrpc_header *, gfp_t);
+struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
+void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+void __rxrpc_put_call(struct rxrpc_call *);
+void __exit rxrpc_destroy_all_calls(void);
 
 /*
  * ar-connection.c
@@ -465,19 +463,16 @@ extern void __exit rxrpc_destroy_all_calls(void);
 extern struct list_head rxrpc_connections;
 extern rwlock_t rxrpc_connection_lock;
 
-extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
-                                                 struct rxrpc_transport *,
-                                                 struct key *,
-                                                 __be16, gfp_t);
-extern void rxrpc_put_bundle(struct rxrpc_transport *,
-                            struct rxrpc_conn_bundle *);
-extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
-                             struct rxrpc_conn_bundle *, struct rxrpc_call *,
-                             gfp_t);
-extern void rxrpc_put_connection(struct rxrpc_connection *);
-extern void __exit rxrpc_destroy_all_connections(void);
-extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
-                                                     struct rxrpc_header *);
+struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
+                                          struct rxrpc_transport *,
+                                          struct key *, __be16, gfp_t);
+void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
+                      struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
+void rxrpc_put_connection(struct rxrpc_connection *);
+void __exit rxrpc_destroy_all_connections(void);
+struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
+                                              struct rxrpc_header *);
 extern struct rxrpc_connection *
 rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
                          gfp_t);
@@ -485,15 +480,15 @@ rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
 /*
  * ar-connevent.c
  */
-extern void rxrpc_process_connection(struct work_struct *);
-extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-extern void rxrpc_reject_packets(struct work_struct *);
+void rxrpc_process_connection(struct work_struct *);
+void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
+void rxrpc_reject_packets(struct work_struct *);
 
 /*
  * ar-error.c
  */
-extern void rxrpc_UDP_error_report(struct sock *);
-extern void rxrpc_UDP_error_handler(struct work_struct *);
+void rxrpc_UDP_error_report(struct sock *);
+void rxrpc_UDP_error_handler(struct work_struct *);
 
 /*
  * ar-input.c
@@ -501,18 +496,17 @@ extern void rxrpc_UDP_error_handler(struct work_struct *);
 extern unsigned long rxrpc_ack_timeout;
 extern const char *rxrpc_pkts[];
 
-extern void rxrpc_data_ready(struct sock *, int);
-extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
-                              bool);
-extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_data_ready(struct sock *, int);
+int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
+void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
 
 /*
  * ar-local.c
  */
 extern rwlock_t rxrpc_local_lock;
-extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
-extern void rxrpc_put_local(struct rxrpc_local *);
-extern void __exit rxrpc_destroy_all_locals(void);
+struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
+void rxrpc_put_local(struct rxrpc_local *);
+void __exit rxrpc_destroy_all_locals(void);
 
 /*
  * ar-key.c
@@ -520,31 +514,29 @@ extern void __exit rxrpc_destroy_all_locals(void);
 extern struct key_type key_type_rxrpc;
 extern struct key_type key_type_rxrpc_s;
 
-extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
-                                    time_t, u32);
+int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
+int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
+                             u32);
 
 /*
  * ar-output.c
  */
 extern int rxrpc_resend_timeout;
 
-extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
-                               struct rxrpc_transport *, struct msghdr *,
-                               size_t);
-extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
-                               struct msghdr *, size_t);
+int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
+int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
+                        struct rxrpc_transport *, struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
+                        size_t);
 
 /*
  * ar-peer.c
  */
-extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
-extern void rxrpc_put_peer(struct rxrpc_peer *);
-extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
-                                         __be32, __be16);
-extern void __exit rxrpc_destroy_all_peers(void);
+struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
+void rxrpc_put_peer(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
+void __exit rxrpc_destroy_all_peers(void);
 
 /*
  * ar-proc.c
@@ -556,38 +548,36 @@ extern const struct file_operations rxrpc_connection_seq_fops;
 /*
  * ar-recvmsg.c
  */
-extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
-                        size_t, int);
+void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+                 int);
 
 /*
  * ar-security.c
  */
-extern int rxrpc_register_security(struct rxrpc_security *);
-extern void rxrpc_unregister_security(struct rxrpc_security *);
-extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
-                              size_t, void *);
-extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
-                              u32 *);
-extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
+int rxrpc_register_security(struct rxrpc_security *);
+void rxrpc_unregister_security(struct rxrpc_security *);
+int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
+                       void *);
+int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
+void rxrpc_clear_conn_security(struct rxrpc_connection *);
 
 /*
  * ar-skbuff.c
  */
-extern void rxrpc_packet_destructor(struct sk_buff *);
+void rxrpc_packet_destructor(struct sk_buff *);
 
 /*
  * ar-transport.c
  */
-extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
-                                                  struct rxrpc_peer *,
-                                                  gfp_t);
-extern void rxrpc_put_transport(struct rxrpc_transport *);
-extern void __exit rxrpc_destroy_all_transports(void);
-extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
-                                                   struct rxrpc_peer *);
+struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
+                                           struct rxrpc_peer *, gfp_t);
+void rxrpc_put_transport(struct rxrpc_transport *);
+void __exit rxrpc_destroy_all_transports(void);
+struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
+                                            struct rxrpc_peer *);
 
 /*
  * debug tracing
index 189e3c5b3d098a0d224a8fb2a04b1bd76e47b3ac..272d8e924cf6b2467e2772898aca2d7895131545 100644 (file)
@@ -231,14 +231,14 @@ override:
        }
        if (R_tab) {
                police->rate_present = true;
-               psched_ratecfg_precompute(&police->rate, &R_tab->rate);
+               psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
                qdisc_put_rtab(R_tab);
        } else {
                police->rate_present = false;
        }
        if (P_tab) {
                police->peak_present = true;
-               psched_ratecfg_precompute(&police->peak, &P_tab->rate);
+               psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
                qdisc_put_rtab(P_tab);
        } else {
                police->peak_present = false;
index d76a35d0dc85b82ddd9b8f2cc037b1ff0d31f5d5..636d9131d87016a46597af5cadd3471d21e1798f 100644 (file)
@@ -137,7 +137,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
                           struct nlattr **tb,
                           struct nlattr *est)
 {
-       int err = -EINVAL;
+       int err;
        struct tcf_exts e;
        struct tcf_ematch_tree t;
 
index 867b4a3e39800fb44819e3cfd68ad0fb30d6ea05..16006c92c3fde515d9047a4e32f0ec3f0df2f005 100644 (file)
@@ -72,11 +72,11 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
                        struct cgroup_taskset *tset)
 {
        struct task_struct *p;
-       void *v;
+       struct cgroup_cls_state *cs = css_cls_state(css);
+       void *v = (void *)(unsigned long)cs->classid;
 
        cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               v = (void *)(unsigned long)task_cls_classid(p);
                iterate_fd(p->files, 0, update_classid, v);
                task_unlock(p);
        }
index 938b7cbf56278b34c6d5574a64c8c32fa4a1d3a2..527aeb7a3ff0b3b558873d0c7d93fc2c26e100b8 100644 (file)
@@ -24,11 +24,12 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
 {
        struct xt_set_info *set = data;
        ip_set_id_t index;
+       struct net *net = dev_net(qdisc_dev(tp->q));
 
        if (data_len != sizeof(*set))
                return -EINVAL;
 
-       index = ip_set_nfnl_get_byindex(set->index);
+       index = ip_set_nfnl_get_byindex(net, set->index);
        if (index == IPSET_INVALID_ID)
                return -ENOENT;
 
@@ -37,7 +38,7 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
        if (em->data)
                return 0;
 
-       ip_set_nfnl_put(index);
+       ip_set_nfnl_put(net, index);
        return -ENOMEM;
 }
 
@@ -45,7 +46,7 @@ static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
 {
        const struct xt_set_info *set = (const void *) em->data;
        if (set) {
-               ip_set_nfnl_put(set->index);
+               ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index);
                kfree((void *) em->data);
        }
 }
index 7c3de6ffa5164db0f7abd3f0e2cc0ca2e92ecda9..e5cef95672256b650249c80bef36930f78be9f13 100644 (file)
@@ -793,8 +793,10 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
                goto errout;
 
        meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-       if (meta == NULL)
+       if (meta == NULL) {
+               err = -ENOMEM;
                goto errout;
+       }
 
        memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
        memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
index 2adda7fa2d390c4bb2aec883cf049df7e7cef9ef..cd81505662b8a3bcc201a18bce0d6c9bd421b650 100644 (file)
@@ -737,9 +737,11 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        const struct Qdisc_class_ops *cops;
        unsigned long cl;
        u32 parentid;
+       int drops;
 
        if (n == 0)
                return;
+       drops = max_t(int, n, 0);
        while ((parentid = sch->parent)) {
                if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
                        return;
@@ -756,6 +758,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
                        cops->put(sch, cl);
                }
                sch->q.qlen -= n;
+               sch->qstats.drops += drops;
        }
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
index 32ad015ee8ce4a9c5b967c22dd90631881f2362b..a9dfdda9ed1d55d17643f36ba05d9ac04ce1557d 100644 (file)
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 
 
 /* remove one skb from head of flow queue */
-static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
+static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 {
        struct sk_buff *skb = flow->head;
 
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
                flow->head = skb->next;
                skb->next = NULL;
                flow->qlen--;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               sch->q.qlen--;
        }
        return skb;
 }
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
+       u32 rate;
 
-       skb = fq_dequeue_head(&q->internal);
+       skb = fq_dequeue_head(sch, &q->internal);
        if (skb)
                goto out;
        fq_check_throttled(q, now);
@@ -449,7 +452,7 @@ begin:
                goto begin;
        }
 
-       skb = fq_dequeue_head(f);
+       skb = fq_dequeue_head(sch, f);
        if (!skb) {
                head->first = f->next;
                /* force a pass through old_flows to prevent starvation */
@@ -466,43 +469,70 @@ begin:
        f->time_next_packet = now;
        f->credit -= qdisc_pkt_len(skb);
 
-       if (f->credit <= 0 &&
-           q->rate_enable &&
-           skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
-               u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+       if (f->credit > 0 || !q->rate_enable)
+               goto out;
 
-               rate = min(rate, q->flow_max_rate);
-               if (rate) {
-                       u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
+       rate = q->flow_max_rate;
+       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+               rate = min(skb->sk->sk_pacing_rate, rate);
 
-                       do_div(len, rate);
-                       /* Since socket rate can change later,
-                        * clamp the delay to 125 ms.
-                        * TODO: maybe segment the too big skb, as in commit
-                        * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
-                        */
-                       if (unlikely(len > 125 * NSEC_PER_MSEC)) {
-                               len = 125 * NSEC_PER_MSEC;
-                               q->stat_pkts_too_long++;
-                       }
+       if (rate != ~0U) {
+               u32 plen = max(qdisc_pkt_len(skb), q->quantum);
+               u64 len = (u64)plen * NSEC_PER_SEC;
 
-                       f->time_next_packet = now + len;
+               if (likely(rate))
+                       do_div(len, rate);
+               /* Since socket rate can change later,
+                * clamp the delay to 125 ms.
+                * TODO: maybe segment the too big skb, as in commit
+                * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+                */
+               if (unlikely(len > 125 * NSEC_PER_MSEC)) {
+                       len = 125 * NSEC_PER_MSEC;
+                       q->stat_pkts_too_long++;
                }
+
+               f->time_next_packet = now + len;
        }
 out:
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
        qdisc_bstats_update(sch, skb);
-       sch->q.qlen--;
        qdisc_unthrottled(sch);
        return skb;
 }
 
 static void fq_reset(struct Qdisc *sch)
 {
+       struct fq_sched_data *q = qdisc_priv(sch);
+       struct rb_root *root;
        struct sk_buff *skb;
+       struct rb_node *p;
+       struct fq_flow *f;
+       unsigned int idx;
 
-       while ((skb = fq_dequeue(sch)) != NULL)
+       while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
                kfree_skb(skb);
+
+       if (!q->fq_root)
+               return;
+
+       for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
+               root = &q->fq_root[idx];
+               while ((p = rb_first(root)) != NULL) {
+                       f = container_of(p, struct fq_flow, fq_node);
+                       rb_erase(p, root);
+
+                       while ((skb = fq_dequeue_head(sch, f)) != NULL)
+                               kfree_skb(skb);
+
+                       kmem_cache_free(fq_flow_cachep, f);
+               }
+       }
+       q->new_flows.first      = NULL;
+       q->old_flows.first      = NULL;
+       q->delayed              = RB_ROOT;
+       q->flows                = 0;
+       q->inactive_flows       = 0;
+       q->throttled_flows      = 0;
 }
 
 static void fq_rehash(struct fq_sched_data *q,
@@ -622,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
                q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+               q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 
        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
                q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -645,6 +675,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = fq_dequeue(sch);
 
+               if (!skb)
+                       break;
                kfree_skb(skb);
                drop_count++;
        }
@@ -657,21 +689,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
 static void fq_destroy(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       struct rb_root *root;
-       struct rb_node *p;
-       unsigned int idx;
 
-       if (q->fq_root) {
-               for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
-                       root = &q->fq_root[idx];
-                       while ((p = rb_first(root)) != NULL) {
-                               rb_erase(p, root);
-                               kmem_cache_free(fq_flow_cachep,
-                                               container_of(p, struct fq_flow, fq_node));
-                       }
-               }
-               kfree(q->fq_root);
-       }
+       fq_reset(sch);
+       kfree(q->fq_root);
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
@@ -711,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
 
+       /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+        * do not bother giving its value
+        */
        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
-           nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
index a74e278654aa2290a0ef3061d1392c5cf405e5f5..7fc899a943a8fa8368415bc0c6c8a939bd042963 100644 (file)
@@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
        struct net_device *dev;
        bool sync_needed = false;
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                netdev_for_each_tx_queue(dev, dev_deactivate_queue,
                                         &noop_qdisc);
                if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
                synchronize_net();
 
        /* Wait for outstanding qdisc_run calls. */
-       list_for_each_entry(dev, head, unreg_list)
+       list_for_each_entry(dev, head, close_list)
                while (some_qdisc_is_busy(dev))
                        yield();
 }
@@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
 {
        LIST_HEAD(single);
 
-       list_add(&dev->unreg_list, &single);
+       list_add(&dev->close_list, &single);
        dev_deactivate_many(&single);
        list_del(&single);
 }
@@ -910,11 +910,12 @@ void dev_shutdown(struct net_device *dev)
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-                              const struct tc_ratespec *conf)
+                              const struct tc_ratespec *conf,
+                              u64 rate64)
 {
        memset(r, 0, sizeof(*r));
        r->overhead = conf->overhead;
-       r->rate_bytes_ps = conf->rate;
+       r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
        r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
        r->mult = 1;
        /*
index 863846cc5513b5cd136264265d226a811bd868c6..0e1e38b40025fd111f50bfce339a6d2e7cae1252 100644 (file)
@@ -997,6 +997,8 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
        [TCA_HTB_CTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
        [TCA_HTB_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
        [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
+       [TCA_HTB_RATE64] = { .type = NLA_U64 },
+       [TCA_HTB_CEIL64] = { .type = NLA_U64 },
 };
 
 static void htb_work_func(struct work_struct *work)
@@ -1114,6 +1116,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        opt.level = cl->level;
        if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
+       if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
+           nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+               goto nla_put_failure;
+       if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
+           nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        spin_unlock_bh(root_lock);
@@ -1332,6 +1340,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_opt *hopt;
+       u64 rate64, ceil64;
 
        /* extract all subattrs from opt attr */
        if (!opt)
@@ -1491,8 +1500,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        cl->prio = TC_HTB_NUMPRIO - 1;
        }
 
-       psched_ratecfg_precompute(&cl->rate, &hopt->rate);
-       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
+       rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+       ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+       psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
 
        cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
        cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
index a6d788d45216a6f286e5aaea0cdb0587cd0f7848..b87e83d0747821bc2251c3b099f91a3358c211d9 100644 (file)
@@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
+static void tfifo_reset(struct Qdisc *sch)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       struct rb_node *p;
+
+       while ((p = rb_first(&q->t_root))) {
+               struct sk_buff *skb = netem_rb_to_skb(p);
+
+               rb_erase(p, &q->t_root);
+               skb->next = NULL;
+               skb->prev = NULL;
+               kfree_skb(skb);
+       }
+}
+
 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
                        skb->next = NULL;
                        skb->prev = NULL;
                        len = qdisc_pkt_len(skb);
+                       sch->qstats.backlog -= len;
                        kfree_skb(skb);
                }
        }
@@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset_queue(sch);
+       tfifo_reset(sch);
        if (q->qdisc)
                qdisc_reset(q->qdisc);
        qdisc_watchdog_cancel(&q->watchdog);
index 1aaf1b6e51a2be238bc47797597ed3bcb76de4b1..b0571224f3c92c00f2899db53baaf04bfdaa45c8 100644 (file)
@@ -341,9 +341,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
 
-       psched_ratecfg_precompute(&q->rate, &rtab->rate);
+       psched_ratecfg_precompute(&q->rate, &rtab->rate, 0);
        if (ptab) {
-               psched_ratecfg_precompute(&q->peak, &ptab->rate);
+               psched_ratecfg_precompute(&q->peak, &ptab->rate, 0);
                q->peak_present = true;
        } else {
                q->peak_present = false;
index e7b2d4fe2b6a120c66b3e869d796eb6dba69c2d2..7567e6f1a9205bb0a37a29becc243d9ed6352fe4 100644 (file)
@@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
                rcu_read_lock();
                list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-                       if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
+                       if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
+                           (laddr->state != SCTP_ADDR_SRC &&
+                            !asoc->src_out_of_asoc_ok))
                                continue;
 
                        /* Do not compare against v4 addrs */
@@ -426,20 +428,20 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 {
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = 0;
-       addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+       addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
 static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 {
        if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-               inet6_sk(sk)->rcv_saddr.s6_addr32[0] = 0;
-               inet6_sk(sk)->rcv_saddr.s6_addr32[1] = 0;
-               inet6_sk(sk)->rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
-               inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
+               sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
+               sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
+               sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
+               sk->sk_v6_rcv_saddr.s6_addr32[3] =
                        addr->v4.sin_addr.s_addr;
        } else {
-               inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+               sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
        }
 }
 
@@ -447,12 +449,12 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
 {
        if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-               inet6_sk(sk)->daddr.s6_addr32[0] = 0;
-               inet6_sk(sk)->daddr.s6_addr32[1] = 0;
-               inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
-               inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+               sk->sk_v6_daddr.s6_addr32[0] = 0;
+               sk->sk_v6_daddr.s6_addr32[1] = 0;
+               sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
+               sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        } else {
-               inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+               sk->sk_v6_daddr = addr->v6.sin6_addr;
        }
 }
 
index 0ac3a65daccb71cd78ae5e2c52696c33df7e16e4..319137340d158df36094c45b8eae1fc13df50825 100644 (file)
@@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
        if (!sctp_checksum_disable) {
-               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+                   (dst_xfrm(dst) != NULL) || packet->ipfragok) {
                        __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
                        /* 3) Put the resultant value into the checksum field in the
index 911b71b26b0e6670bfac1a7f8d450aea510f7333..72046b9729a8a6a669fb9c75446de8ef2fe345c6 100644 (file)
@@ -5890,7 +5890,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                int low, high, remaining, index;
                unsigned int rover;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(sock_net(sk), &low, &high);
                remaining = (high - low) + 1;
                rover = net_random() % remaining + low;
 
index ebed4b68f768a1afccea45ea7dc22ef555f3f679..c226aceee65b8b8c59d93d6a133a04c86177ceef 100644 (file)
@@ -1964,6 +1964,16 @@ struct used_address {
        unsigned int name_len;
 };
 
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+                                struct msghdr __user *umsg)
+{
+       if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+               return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
+       return 0;
+}
+
 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags,
                         struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
index fcac5d14171779a32599e6b4111b75a8e94a15aa..084656671d6ee4cebc6220ad0d92bae654497882 100644 (file)
@@ -1075,6 +1075,15 @@ gss_destroy(struct rpc_auth *auth)
        kref_put(&gss_auth->kref, gss_free_callback);
 }
 
+/*
+ * Auths may be shared between rpc clients that were cloned from a
+ * common client with the same xprt, if they also share the flavor and
+ * target_name.
+ *
+ * The auth is looked up from the oldest parent sharing the same
+ * cl_xprt, and the auth itself references only that common parent
+ * (which is guaranteed to last as long as any of its descendants).
+ */
 static struct gss_auth *
 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
                struct rpc_clnt *clnt,
@@ -1088,6 +1097,8 @@ gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
                        gss_auth,
                        hash,
                        hashval) {
+               if (gss_auth->client != clnt)
+                       continue;
                if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
                        continue;
                if (gss_auth->target_name != args->target_name) {
index 77479606a9716e9526019ba5e5a0dfa4b7d010a2..7352aef8a25444e450e17e38bbe9bcbf81185ddc 100644 (file)
@@ -772,6 +772,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
                atomic_inc(&clnt->cl_count);
                if (clnt->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
+               if (clnt->cl_noretranstimeo)
+                       task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
                if (sk_memalloc_socks()) {
                        struct rpc_xprt *xprt;
 
@@ -1690,6 +1692,7 @@ call_connect_status(struct rpc_task *task)
        dprint_status(task);
 
        trace_rpc_connect_status(task, status);
+       task->tk_status = 0;
        switch (status) {
                /* if soft mounted, test if we've timed out */
        case -ETIMEDOUT:
@@ -1698,12 +1701,14 @@ call_connect_status(struct rpc_task *task)
        case -ECONNREFUSED:
        case -ECONNRESET:
        case -ENETUNREACH:
+               /* retry with existing socket, after a delay */
+               rpc_delay(task, 3*HZ);
                if (RPC_IS_SOFTCONN(task))
                        break;
-               /* retry with existing socket, after a delay */
-       case 0:
        case -EAGAIN:
-               task->tk_status = 0;
+               task->tk_action = call_bind;
+               return;
+       case 0:
                clnt->cl_stats->netreconn++;
                task->tk_action = call_transmit;
                return;
@@ -1717,13 +1722,14 @@ call_connect_status(struct rpc_task *task)
 static void
 call_transmit(struct rpc_task *task)
 {
+       int is_retrans = RPC_WAS_SENT(task);
+
        dprint_status(task);
 
        task->tk_action = call_status;
        if (task->tk_status < 0)
                return;
-       task->tk_status = xprt_prepare_transmit(task);
-       if (task->tk_status != 0)
+       if (!xprt_prepare_transmit(task))
                return;
        task->tk_action = call_transmit_status;
        /* Encode here so that rpcsec_gss can use correct sequence number. */
@@ -1742,6 +1748,8 @@ call_transmit(struct rpc_task *task)
        xprt_transmit(task);
        if (task->tk_status < 0)
                return;
+       if (is_retrans)
+               task->tk_client->cl_stats->rpcretrans++;
        /*
         * On success, ensure that we call xprt_end_transmit() before sleeping
         * in order to allow access to the socket to other RPC requests.
@@ -1811,8 +1819,7 @@ call_bc_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
-       task->tk_status = xprt_prepare_transmit(task);
-       if (task->tk_status == -EAGAIN) {
+       if (!xprt_prepare_transmit(task)) {
                /*
                 * Could not reserve the transport. Try again after the
                 * transport is released.
@@ -1900,7 +1907,8 @@ call_status(struct rpc_task *task)
                rpc_delay(task, 3*HZ);
        case -ETIMEDOUT:
                task->tk_action = call_timeout;
-               if (task->tk_client->cl_discrtry)
+               if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+                   && task->tk_client->cl_discrtry)
                        xprt_conditional_disconnect(req->rq_xprt,
                                        req->rq_connect_cookie);
                break;
@@ -1982,7 +1990,6 @@ call_timeout(struct rpc_task *task)
        rpcauth_invalcred(task);
 
 retry:
-       clnt->cl_stats->rpcretrans++;
        task->tk_action = call_bind;
        task->tk_status = 0;
 }
@@ -2025,7 +2032,6 @@ call_decode(struct rpc_task *task)
        if (req->rq_rcv_buf.len < 12) {
                if (!RPC_IS_SOFT(task)) {
                        task->tk_action = call_bind;
-                       clnt->cl_stats->rpcretrans++;
                        goto out_retry;
                }
                dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
index f94567b45bb3eb9f4f8b0ec82db4ee08a304c48b..d0d14a04dce1eb2e4274e3111d9008b71da918aa 100644 (file)
@@ -519,8 +519,8 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
        d_add(dentry, inode);
        return 0;
 out_err:
-       printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
-                       __FILE__, __func__, dentry->d_name.name);
+       printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %pd\n",
+                       __FILE__, __func__, dentry);
        dput(dentry);
        return -ENOMEM;
 }
@@ -755,8 +755,8 @@ static int rpc_populate(struct dentry *parent,
 out_bad:
        __rpc_depopulate(parent, files, start, eof);
        mutex_unlock(&dir->i_mutex);
-       printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
-                       __FILE__, __func__, parent->d_name.name);
+       printk(KERN_WARNING "%s: %s failed to populate directory %pd\n",
+                       __FILE__, __func__, parent);
        return err;
 }
 
@@ -852,8 +852,8 @@ out:
        return dentry;
 out_err:
        dentry = ERR_PTR(err);
-       printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
-                       __FILE__, __func__, parent->d_name.name, name,
+       printk(KERN_WARNING "%s: %s() failed to create pipe %pd/%s (errno = %d)\n",
+                       __FILE__, __func__, parent, name,
                        err);
        goto out;
 }
index 9c9caaa5e0d3129f8872350ded706321fb2509c2..b6e59f0a9475d665ea1952a8428e56ed6b06b89e 100644 (file)
@@ -291,12 +291,14 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
                                &inet_sk(sk)->inet_rcv_saddr,
                                inet_sk(sk)->inet_num);
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
                                proto_name,
-                               &inet6_sk(sk)->rcv_saddr,
+                               &sk->sk_v6_rcv_saddr,
                                inet_sk(sk)->inet_num);
                break;
+#endif
        default:
                len = snprintf(buf, remaining, "*unknown-%d*\n",
                                sk->sk_family);
index 095363eee764b3ff496a745f56e4fa646f5f02c0..4953550537e0d6284625d39bf46992a792966ace 100644 (file)
@@ -205,10 +205,8 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
                goto out_sleep;
        }
        xprt->snd_task = task;
-       if (req != NULL) {
-               req->rq_bytes_sent = 0;
+       if (req != NULL)
                req->rq_ntrans++;
-       }
 
        return 1;
 
@@ -263,7 +261,6 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
        }
        if (__xprt_get_cong(xprt, task)) {
                xprt->snd_task = task;
-               req->rq_bytes_sent = 0;
                req->rq_ntrans++;
                return 1;
        }
@@ -300,10 +297,8 @@ static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 
        req = task->tk_rqstp;
        xprt->snd_task = task;
-       if (req) {
-               req->rq_bytes_sent = 0;
+       if (req)
                req->rq_ntrans++;
-       }
        return true;
 }
 
@@ -329,7 +324,6 @@ static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
        }
        if (__xprt_get_cong(xprt, task)) {
                xprt->snd_task = task;
-               req->rq_bytes_sent = 0;
                req->rq_ntrans++;
                return true;
        }
@@ -358,6 +352,11 @@ out_unlock:
 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        if (xprt->snd_task == task) {
+               if (task != NULL) {
+                       struct rpc_rqst *req = task->tk_rqstp;
+                       if (req != NULL)
+                               req->rq_bytes_sent = 0;
+               }
                xprt_clear_locked(xprt);
                __xprt_lock_write_next(xprt);
        }
@@ -375,6 +374,11 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt);
 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        if (xprt->snd_task == task) {
+               if (task != NULL) {
+                       struct rpc_rqst *req = task->tk_rqstp;
+                       if (req != NULL)
+                               req->rq_bytes_sent = 0;
+               }
                xprt_clear_locked(xprt);
                __xprt_lock_write_next_cong(xprt);
        }
@@ -854,24 +858,36 @@ static inline int xprt_has_timer(struct rpc_xprt *xprt)
  * @task: RPC task about to send a request
  *
  */
-int xprt_prepare_transmit(struct rpc_task *task)
+bool xprt_prepare_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
-       int err = 0;
+       bool ret = false;
 
        dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
 
        spin_lock_bh(&xprt->transport_lock);
-       if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
-               err = req->rq_reply_bytes_recvd;
+       if (!req->rq_bytes_sent) {
+               if (req->rq_reply_bytes_recvd) {
+                       task->tk_status = req->rq_reply_bytes_recvd;
+                       goto out_unlock;
+               }
+               if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+                   && xprt_connected(xprt)
+                   && req->rq_connect_cookie == xprt->connect_cookie) {
+                       xprt->ops->set_retrans_timeout(task);
+                       rpc_sleep_on(&xprt->pending, task, xprt_timer);
+                       goto out_unlock;
+               }
+       }
+       if (!xprt->ops->reserve_xprt(xprt, task)) {
+               task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!xprt->ops->reserve_xprt(xprt, task))
-               err = -EAGAIN;
+       ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
-       return err;
+       return ret;
 }
 
 void xprt_end_transmit(struct rpc_task *task)
@@ -912,7 +928,6 @@ void xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
-       req->rq_connect_cookie = xprt->connect_cookie;
        req->rq_xtime = ktime_get();
        status = xprt->ops->send_request(task);
        if (status != 0) {
@@ -938,12 +953,14 @@ void xprt_transmit(struct rpc_task *task)
        /* Don't race with disconnect */
        if (!xprt_connected(xprt))
                task->tk_status = -ENOTCONN;
-       else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+       else {
                /*
                 * Sleep on the pending queue since
                 * we're expecting a reply.
                 */
-               rpc_sleep_on(&xprt->pending, task, xprt_timer);
+               if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+                       rpc_sleep_on(&xprt->pending, task, xprt_timer);
+               req->rq_connect_cookie = xprt->connect_cookie;
        }
        spin_unlock_bh(&xprt->transport_lock);
 }
@@ -1186,6 +1203,12 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
        req->rq_xid     = xprt_alloc_xid(xprt);
+       req->rq_connect_cookie = xprt->connect_cookie - 1;
+       req->rq_bytes_sent = 0;
+       req->rq_snd_buf.len = 0;
+       req->rq_snd_buf.buflen = 0;
+       req->rq_rcv_buf.len = 0;
+       req->rq_rcv_buf.buflen = 0;
        req->rq_release_snd_buf = NULL;
        xprt_reset_majortimeo(req);
        dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
index ee03d35677d962a3385d8d01a31968b70fa77b56..9928ba164d62ad344b1b8c487087561904fb381f 100644 (file)
@@ -1511,6 +1511,7 @@ static void xs_tcp_state_change(struct sock *sk)
                        transport->tcp_copied = 0;
                        transport->tcp_flags =
                                TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+                       xprt->connect_cookie++;
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
@@ -2112,6 +2113,19 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
 
        if (!transport->inet) {
                struct sock *sk = sock->sk;
+               unsigned int keepidle = xprt->timeout->to_initval / HZ;
+               unsigned int keepcnt = xprt->timeout->to_retries + 1;
+               unsigned int opt_on = 1;
+
+               /* TCP Keepalive options */
+               kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+                               (char *)&opt_on, sizeof(opt_on));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
+                               (char *)&keepidle, sizeof(keepidle));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
+                               (char *)&keepidle, sizeof(keepidle));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
+                               (char *)&keepcnt, sizeof(keepcnt));
 
                write_lock_bh(&sk->sk_callback_lock);
 
@@ -2151,7 +2165,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
        case 0:
        case -EINPROGRESS:
                /* SYN_SENT! */
-               xprt->connect_cookie++;
                if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
                        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
        }
index 9bc6db04be3ea7cd998f41187ff40b19baa9c920..e7000be321b0148469264524ed6fce75c3952955 100644 (file)
@@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
 
        /* Allow network administrator to have same access as root. */
        if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
-           uid_eq(root_uid, current_uid())) {
+           uid_eq(root_uid, current_euid())) {
                int mode = (table->mode >> 6) & 7;
                return (mode << 6) | (mode << 3) | mode;
        }
        /* Allow netns root group to have the same access as the root group */
-       if (gid_eq(root_gid, current_gid())) {
+       if (in_egroup_p(root_gid)) {
                int mode = (table->mode >> 3) & 7;
                return (mode << 3) | mode;
        }
index 609c30c808165191a69899e750439666addc29f8..3f9707a16d0650d6367ad716ab2a110318a03e96 100644 (file)
@@ -387,7 +387,7 @@ restart:
 
        b_ptr = &tipc_bearers[bearer_id];
        strcpy(b_ptr->name, name);
-       res = m_ptr->enable_bearer(b_ptr);
+       res = m_ptr->enable_media(b_ptr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -420,23 +420,15 @@ exit:
 }
 
 /**
- * tipc_block_bearer - Block the bearer with the given name, and reset all its links
+ * tipc_block_bearer - Block the bearer, and reset all its links
  */
-int tipc_block_bearer(const char *name)
+int tipc_block_bearer(struct tipc_bearer *b_ptr)
 {
-       struct tipc_bearer *b_ptr = NULL;
        struct tipc_link *l_ptr;
        struct tipc_link *temp_l_ptr;
 
        read_lock_bh(&tipc_net_lock);
-       b_ptr = tipc_bearer_find(name);
-       if (!b_ptr) {
-               pr_warn("Attempt to block unknown bearer <%s>\n", name);
-               read_unlock_bh(&tipc_net_lock);
-               return -EINVAL;
-       }
-
-       pr_info("Blocking bearer <%s>\n", name);
+       pr_info("Blocking bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
        b_ptr->blocked = 1;
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -465,7 +457,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
        b_ptr->blocked = 1;
-       b_ptr->media->disable_bearer(b_ptr);
+       b_ptr->media->disable_media(b_ptr);
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
index 09c869adcfcf4fd7f3e6cac8423c4e3ccd7762c1..e5e04be6fffa3782c01f37db795c7f84c8b71793 100644 (file)
@@ -75,8 +75,8 @@ struct tipc_bearer;
 /**
  * struct tipc_media - TIPC media information available to internal users
  * @send_msg: routine which handles buffer transmission
- * @enable_bearer: routine which enables a bearer
- * @disable_bearer: routine which disables a bearer
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
  * @addr2str: routine which converts media address to string
  * @addr2msg: routine which converts media address to protocol message area
  * @msg2addr: routine which converts media address from protocol message area
@@ -91,8 +91,8 @@ struct tipc_media {
        int (*send_msg)(struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_bearer)(struct tipc_bearer *b_ptr);
-       void (*disable_bearer)(struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct tipc_bearer *b_ptr);
+       void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
        int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
        int (*msg2addr)(const struct tipc_bearer *b_ptr,
@@ -163,7 +163,7 @@ int tipc_register_media(struct tipc_media *m_ptr);
 
 void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 
-int  tipc_block_bearer(const char *name);
+int  tipc_block_bearer(struct tipc_bearer *b_ptr);
 void tipc_continue(struct tipc_bearer *tb_ptr);
 
 int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
index be72f8cebc536a425a79d99a5435fa10f8e58b89..94895d4e86abe249f4ef954d527e119647665c92 100644 (file)
@@ -90,21 +90,21 @@ extern int tipc_random __read_mostly;
 /*
  * Routines available to privileged subsystems
  */
-extern int tipc_core_start_net(unsigned long);
-extern int  tipc_handler_start(void);
-extern void tipc_handler_stop(void);
-extern int  tipc_netlink_start(void);
-extern void tipc_netlink_stop(void);
-extern int  tipc_socket_init(void);
-extern void tipc_socket_stop(void);
-extern int tipc_sock_create_local(int type, struct socket **res);
-extern void tipc_sock_release_local(struct socket *sock);
-extern int tipc_sock_accept_local(struct socket *sock,
-                                 struct socket **newsock, int flags);
+int tipc_core_start_net(unsigned long);
+int tipc_handler_start(void);
+void tipc_handler_stop(void);
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+                          int flags);
 
 #ifdef CONFIG_SYSCTL
-extern int tipc_register_sysctl(void);
-extern void tipc_unregister_sysctl(void);
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
 #else
 #define tipc_register_sysctl() 0
 #define tipc_unregister_sysctl()
@@ -201,6 +201,6 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
        return (struct tipc_msg *)skb->data;
 }
 
-extern struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size);
 
 #endif
index 40ea40cf6204506f4a579f99d2a0c3d8e79652ca..f80d59f5a161c33199ef4d00494117b8115e2676 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/eth_media.c: Ethernet bearer support for TIPC
  *
  * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_ETH_BEARERS                MAX_BEARERS
+#define MAX_ETH_MEDIA          MAX_BEARERS
 
 #define ETH_ADDR_OFFSET        4       /* message header offset of MAC address */
 
 /**
- * struct eth_bearer - Ethernet bearer data structure
+ * struct eth_media - Ethernet bearer data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
  * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
-struct eth_bearer {
+struct eth_media {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
@@ -58,7 +58,7 @@ struct eth_bearer {
 };
 
 static struct tipc_media eth_media_info;
-static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static struct eth_media eth_media_array[MAX_ETH_MEDIA];
 static int eth_started;
 
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
@@ -100,7 +100,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
        if (!clone)
                return 0;
 
-       dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+       dev = ((struct eth_media *)(tb_ptr->usr_handle))->dev;
        delta = dev->hard_header_len - skb_headroom(buf);
 
        if ((delta > 0) &&
@@ -128,43 +128,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+       struct eth_media *eb_ptr = (struct eth_media *)pt->af_packet_priv;
 
        if (!net_eq(dev_net(dev), &init_net)) {
                kfree_skb(buf);
-               return 0;
+               return NET_RX_DROP;
        }
 
        if (likely(eb_ptr->bearer)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
                        tipc_recv_msg(buf, eb_ptr->bearer);
-                       return 0;
+                       return NET_RX_SUCCESS;
                }
        }
        kfree_skb(buf);
-       return 0;
+       return NET_RX_DROP;
 }
 
 /**
- * setup_bearer - setup association between Ethernet bearer and interface
+ * setup_media - setup association between Ethernet bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-       struct eth_bearer *eb_ptr =
-               container_of(work, struct eth_bearer, setup);
+       struct eth_media *eb_ptr =
+               container_of(work, struct eth_media, setup);
 
        dev_add_pack(&eb_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an Ethernet interface
+ * enable_media - attach TIPC bearer to an Ethernet interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
        struct net_device *dev;
-       struct eth_bearer *eb_ptr = &eth_bearers[0];
-       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       struct eth_media *eb_ptr = &eth_media_array[0];
+       struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
        int pending_dev = 0;
 
@@ -188,7 +188,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        eb_ptr->tipc_packet_type.func = recv_msg;
        eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
        INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-       INIT_WORK(&eb_ptr->setup, setup_bearer);
+       INIT_WORK(&eb_ptr->setup, setup_media);
        schedule_work(&eb_ptr->setup);
 
        /* Associate TIPC bearer with Ethernet bearer */
@@ -205,14 +205,14 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
 }
 
 /**
- * cleanup_bearer - break association between Ethernet bearer and interface
+ * cleanup_media - break association between Ethernet bearer and interface
  *
  * This routine must be invoked from a work queue because it can sleep.
  */
-static void cleanup_bearer(struct work_struct *work)
+static void cleanup_media(struct work_struct *work)
 {
-       struct eth_bearer *eb_ptr =
-               container_of(work, struct eth_bearer, cleanup);
+       struct eth_media *eb_ptr =
+               container_of(work, struct eth_media, cleanup);
 
        dev_remove_pack(&eb_ptr->tipc_packet_type);
        dev_put(eb_ptr->dev);
@@ -220,18 +220,18 @@ static void cleanup_bearer(struct work_struct *work)
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an Ethernet interface
+ * disable_media - detach TIPC bearer from an Ethernet interface
  *
  * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-       struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+       struct eth_media *eb_ptr = (struct eth_media *)tb_ptr->usr_handle;
 
        eb_ptr->bearer = NULL;
-       INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+       INIT_WORK(&eb_ptr->cleanup, cleanup_media);
        schedule_work(&eb_ptr->cleanup);
 }
 
@@ -245,8 +245,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                             void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct eth_bearer *eb_ptr = &eth_bearers[0];
-       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       struct eth_media *eb_ptr = &eth_media_array[0];
+       struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -265,17 +265,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                if (netif_carrier_ok(dev))
                        tipc_continue(eb_ptr->bearer);
                else
-                       tipc_block_bearer(eb_ptr->bearer->name);
+                       tipc_block_bearer(eb_ptr->bearer);
                break;
        case NETDEV_UP:
                tipc_continue(eb_ptr->bearer);
                break;
        case NETDEV_DOWN:
-               tipc_block_bearer(eb_ptr->bearer->name);
+               tipc_block_bearer(eb_ptr->bearer);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
-               tipc_block_bearer(eb_ptr->bearer->name);
+               tipc_block_bearer(eb_ptr->bearer);
                tipc_continue(eb_ptr->bearer);
                break;
        case NETDEV_UNREGISTER:
@@ -327,8 +327,8 @@ static int eth_msg2addr(const struct tipc_bearer *tb_ptr,
  */
 static struct tipc_media eth_media_info = {
        .send_msg       = send_msg,
-       .enable_bearer  = enable_bearer,
-       .disable_bearer = disable_bearer,
+       .enable_media   = enable_media,
+       .disable_media  = disable_media,
        .addr2str       = eth_addr2str,
        .addr2msg       = eth_addr2msg,
        .msg2addr       = eth_msg2addr,
index 9934a32bfa877a8a8eae83d1fc74e7f31a7a8399..c139892974644925c4eca8e225e0c262b99cf4c2 100644 (file)
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_IB_BEARERS         MAX_BEARERS
+#define MAX_IB_MEDIA           MAX_BEARERS
 
 /**
- * struct ib_bearer - Infiniband bearer data structure
+ * struct ib_media - Infiniband media data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Infiniband network device
  * @tipc_packet_type: used in binding TIPC to Infiniband driver
  * @cleanup: work item used when disabling bearer
  */
 
-struct ib_bearer {
+struct ib_media {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
@@ -61,7 +61,7 @@ struct ib_bearer {
 };
 
 static struct tipc_media ib_media_info;
-static struct ib_bearer ib_bearers[MAX_IB_BEARERS];
+static struct ib_media ib_media_array[MAX_IB_MEDIA];
 static int ib_started;
 
 /**
@@ -93,7 +93,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
        if (!clone)
                return 0;
 
-       dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev;
+       dev = ((struct ib_media *)(tb_ptr->usr_handle))->dev;
        delta = dev->hard_header_len - skb_headroom(buf);
 
        if ((delta > 0) &&
@@ -121,43 +121,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct ib_bearer *ib_ptr = (struct ib_bearer *)pt->af_packet_priv;
+       struct ib_media *ib_ptr = (struct ib_media *)pt->af_packet_priv;
 
        if (!net_eq(dev_net(dev), &init_net)) {
                kfree_skb(buf);
-               return 0;
+               return NET_RX_DROP;
        }
 
        if (likely(ib_ptr->bearer)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
                        tipc_recv_msg(buf, ib_ptr->bearer);
-                       return 0;
+                       return NET_RX_SUCCESS;
                }
        }
        kfree_skb(buf);
-       return 0;
+       return NET_RX_DROP;
 }
 
 /**
  * setup_bearer - setup association between InfiniBand bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-       struct ib_bearer *ib_ptr =
-               container_of(work, struct ib_bearer, setup);
+       struct ib_media *ib_ptr =
+               container_of(work, struct ib_media, setup);
 
        dev_add_pack(&ib_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an InfiniBand interface
+ * enable_media - attach TIPC bearer to an InfiniBand interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
        struct net_device *dev;
-       struct ib_bearer *ib_ptr = &ib_bearers[0];
-       struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+       struct ib_media *ib_ptr = &ib_media_array[0];
+       struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
        int pending_dev = 0;
 
@@ -181,7 +181,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        ib_ptr->tipc_packet_type.func = recv_msg;
        ib_ptr->tipc_packet_type.af_packet_priv = ib_ptr;
        INIT_LIST_HEAD(&(ib_ptr->tipc_packet_type.list));
-       INIT_WORK(&ib_ptr->setup, setup_bearer);
+       INIT_WORK(&ib_ptr->setup, setup_media);
        schedule_work(&ib_ptr->setup);
 
        /* Associate TIPC bearer with InfiniBand bearer */
@@ -204,8 +204,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
  */
 static void cleanup_bearer(struct work_struct *work)
 {
-       struct ib_bearer *ib_ptr =
-               container_of(work, struct ib_bearer, cleanup);
+       struct ib_media *ib_ptr =
+               container_of(work, struct ib_media, cleanup);
 
        dev_remove_pack(&ib_ptr->tipc_packet_type);
        dev_put(ib_ptr->dev);
@@ -213,15 +213,15 @@ static void cleanup_bearer(struct work_struct *work)
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an InfiniBand interface
+ * disable_media - detach TIPC bearer from an InfiniBand interface
  *
  * Mark InfiniBand bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-       struct ib_bearer *ib_ptr = (struct ib_bearer *)tb_ptr->usr_handle;
+       struct ib_media *ib_ptr = (struct ib_media *)tb_ptr->usr_handle;
 
        ib_ptr->bearer = NULL;
        INIT_WORK(&ib_ptr->cleanup, cleanup_bearer);
@@ -238,8 +238,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                             void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct ib_bearer *ib_ptr = &ib_bearers[0];
-       struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+       struct ib_media *ib_ptr = &ib_media_array[0];
+       struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -258,17 +258,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                if (netif_carrier_ok(dev))
                        tipc_continue(ib_ptr->bearer);
                else
-                       tipc_block_bearer(ib_ptr->bearer->name);
+                       tipc_block_bearer(ib_ptr->bearer);
                break;
        case NETDEV_UP:
                tipc_continue(ib_ptr->bearer);
                break;
        case NETDEV_DOWN:
-               tipc_block_bearer(ib_ptr->bearer->name);
+               tipc_block_bearer(ib_ptr->bearer);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
-               tipc_block_bearer(ib_ptr->bearer->name);
+               tipc_block_bearer(ib_ptr->bearer);
                tipc_continue(ib_ptr->bearer);
                break;
        case NETDEV_UNREGISTER:
@@ -323,8 +323,8 @@ static int ib_msg2addr(const struct tipc_bearer *tb_ptr,
  */
 static struct tipc_media ib_media_info = {
        .send_msg       = send_msg,
-       .enable_bearer  = enable_bearer,
-       .disable_bearer = disable_bearer,
+       .enable_media   = enable_media,
+       .disable_media  = disable_media,
        .addr2str       = ib_addr2str,
        .addr2msg       = ib_addr2msg,
        .msg2addr       = ib_msg2addr,
index 0cc3d9015c5d5bb6a1629251d7a636f41239db6f..e8153f64d2d6fb3c23d7f24f42e50ee2ea019df6 100644 (file)
@@ -75,20 +75,6 @@ static const char *link_unk_evt = "Unknown link event ";
  */
 #define START_CHANGEOVER 100000u
 
-/**
- * struct tipc_link_name - deconstructed link name
- * @addr_local: network address of node at this end
- * @if_local: name of interface at this end
- * @addr_peer: network address of node at far end
- * @if_peer: name of interface at far end
- */
-struct tipc_link_name {
-       u32 addr_local;
-       char if_local[TIPC_MAX_IF_NAME];
-       u32 addr_peer;
-       char if_peer[TIPC_MAX_IF_NAME];
-};
-
 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
@@ -97,8 +83,7 @@ static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
 static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
-                                   u32 num_sect, unsigned int total_len,
-                                   u32 destnode);
+                                   unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
@@ -160,72 +145,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
                (l_ptr->owner->active_links[1] == l_ptr);
 }
 
-/**
- * link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name: ptr to link name string
- * @name_parts: ptr to area for link name components (or NULL if not needed)
- *
- * Returns 1 if link name is valid, otherwise 0.
- */
-static int link_name_validate(const char *name,
-                               struct tipc_link_name *name_parts)
-{
-       char name_copy[TIPC_MAX_LINK_NAME];
-       char *addr_local;
-       char *if_local;
-       char *addr_peer;
-       char *if_peer;
-       char dummy;
-       u32 z_local, c_local, n_local;
-       u32 z_peer, c_peer, n_peer;
-       u32 if_local_len;
-       u32 if_peer_len;
-
-       /* copy link name & ensure length is OK */
-       name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
-       /* need above in case non-Posix strncpy() doesn't pad with nulls */
-       strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
-       if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
-               return 0;
-
-       /* ensure all component parts of link name are present */
-       addr_local = name_copy;
-       if_local = strchr(addr_local, ':');
-       if (if_local == NULL)
-               return 0;
-       *(if_local++) = 0;
-       addr_peer = strchr(if_local, '-');
-       if (addr_peer == NULL)
-               return 0;
-       *(addr_peer++) = 0;
-       if_local_len = addr_peer - if_local;
-       if_peer = strchr(addr_peer, ':');
-       if (if_peer == NULL)
-               return 0;
-       *(if_peer++) = 0;
-       if_peer_len = strlen(if_peer) + 1;
-
-       /* validate component parts of link name */
-       if ((sscanf(addr_local, "%u.%u.%u%c",
-                   &z_local, &c_local, &n_local, &dummy) != 3) ||
-           (sscanf(addr_peer, "%u.%u.%u%c",
-                   &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
-           (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
-           (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
-           (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
-               return 0;
-
-       /* return link name components, if necessary */
-       if (name_parts) {
-               name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
-               strcpy(name_parts->if_local, if_local);
-               name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
-               strcpy(name_parts->if_peer, if_peer);
-       }
-       return 1;
-}
-
 /**
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
@@ -1065,8 +984,7 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
  */
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
-                                const u32 num_sect, unsigned int total_len,
-                                u32 destaddr)
+                                unsigned int len, u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->phdr;
        struct tipc_link *l_ptr;
@@ -1080,8 +998,7 @@ again:
         * Try building message using port's max_pkt hint.
         * (Must not hold any locks while building message.)
         */
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
-                            sender->max_pkt, &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
        /* Exit if build request was invalid */
        if (unlikely(res < 0))
                return res;
@@ -1121,8 +1038,7 @@ exit:
                        if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
                                goto again;
 
-                       return link_send_sections_long(sender, msg_sect,
-                                                      num_sect, total_len,
+                       return link_send_sections_long(sender, msg_sect, len,
                                                       destaddr);
                }
                tipc_node_unlock(node);
@@ -1133,8 +1049,8 @@ exit:
        if (buf)
                return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
        if (res >= 0)
-               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                                total_len, TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect,
+                                                len, TIPC_ERR_NO_NODE);
        return res;
 }
 
@@ -1154,18 +1070,17 @@ exit:
  */
 static int link_send_sections_long(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
-                                  u32 num_sect, unsigned int total_len,
-                                  u32 destaddr)
+                                  unsigned int len, u32 destaddr)
 {
        struct tipc_link *l_ptr;
        struct tipc_node *node;
        struct tipc_msg *hdr = &sender->phdr;
-       u32 dsz = total_len;
+       u32 dsz = len;
        u32 max_pkt, fragm_sz, rest;
        struct tipc_msg fragm_hdr;
        struct sk_buff *buf, *buf_chain, *prev;
        u32 fragm_crs, fragm_rest, hsz, sect_rest;
-       const unchar *sect_crs;
+       const unchar __user *sect_crs;
        int curr_sect;
        u32 fragm_no;
        int res = 0;
@@ -1207,7 +1122,7 @@ again:
 
                if (!sect_rest) {
                        sect_rest = msg_sect[++curr_sect].iov_len;
-                       sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+                       sect_crs = msg_sect[curr_sect].iov_base;
                }
 
                if (sect_rest < fragm_rest)
@@ -1283,8 +1198,8 @@ reject:
                        buf = buf_chain->next;
                        kfree_skb(buf_chain);
                }
-               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                                total_len, TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect,
+                                                len, TIPC_ERR_NO_NODE);
        }
 
        /* Append chain of fragments to send queue & send them */
@@ -2585,25 +2500,21 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 static struct tipc_link *link_find_link(const char *name,
                                        struct tipc_node **node)
 {
-       struct tipc_link_name link_name_parts;
-       struct tipc_bearer *b_ptr;
        struct tipc_link *l_ptr;
+       struct tipc_node *n_ptr;
+       int i;
 
-       if (!link_name_validate(name, &link_name_parts))
-               return NULL;
-
-       b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
-       if (!b_ptr)
-               return NULL;
-
-       *node = tipc_node_find(link_name_parts.addr_peer);
-       if (!*node)
-               return NULL;
-
-       l_ptr = (*node)->links[b_ptr->identity];
-       if (!l_ptr || strcmp(l_ptr->name, name))
-               return NULL;
-
+       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+               for (i = 0; i < MAX_BEARERS; i++) {
+                       l_ptr = n_ptr->links[i];
+                       if (l_ptr && !strcmp(l_ptr->name, name))
+                               goto found;
+               }
+       }
+       l_ptr = NULL;
+       n_ptr = NULL;
+found:
+       *node = n_ptr;
        return l_ptr;
 }
 
@@ -2646,6 +2557,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        struct tipc_link *l_ptr;
        struct tipc_bearer *b_ptr;
        struct tipc_media *m_ptr;
+       int res = 0;
 
        l_ptr = link_find_link(name, &node);
        if (l_ptr) {
@@ -2668,9 +2580,12 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                case TIPC_CMD_SET_LINK_WINDOW:
                        tipc_link_set_queue_limits(l_ptr, new_value);
                        break;
+               default:
+                       res = -EINVAL;
+                       break;
                }
                tipc_node_unlock(node);
-               return 0;
+               return res;
        }
 
        b_ptr = tipc_bearer_find(name);
@@ -2678,15 +2593,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                switch (cmd) {
                case TIPC_CMD_SET_LINK_TOL:
                        b_ptr->tolerance = new_value;
-                       return 0;
+                       break;
                case TIPC_CMD_SET_LINK_PRI:
                        b_ptr->priority = new_value;
-                       return 0;
+                       break;
                case TIPC_CMD_SET_LINK_WINDOW:
                        b_ptr->window = new_value;
-                       return 0;
+                       break;
+               default:
+                       res = -EINVAL;
+                       break;
                }
-               return -EINVAL;
+               return res;
        }
 
        m_ptr = tipc_media_find(name);
@@ -2695,15 +2613,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        switch (cmd) {
        case TIPC_CMD_SET_LINK_TOL:
                m_ptr->tolerance = new_value;
-               return 0;
+               break;
        case TIPC_CMD_SET_LINK_PRI:
                m_ptr->priority = new_value;
-               return 0;
+               break;
        case TIPC_CMD_SET_LINK_WINDOW:
                m_ptr->window = new_value;
-               return 0;
+               break;
+       default:
+               res = -EINVAL;
+               break;
        }
-       return -EINVAL;
+       return res;
 }
 
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
index c048ed1cbd765aa3417c945d2913d0c994cb7d3f..55cf8554a08bd974000a67a116583a86e39bda0f 100644 (file)
@@ -227,9 +227,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
-                                const u32 num_sect,
-                                unsigned int total_len,
-                                u32 destnode);
+                                unsigned int len, u32 destnode);
 void tipc_link_recv_bundle(struct sk_buff *buf);
 int  tipc_link_recv_fragment(struct sk_buff **pending,
                             struct sk_buff **fb,
index ced60e2fc4f7fbee929874594ee524c41835cd3e..e525f8ce1dee09ce0d9baf214bd3dd8e13daa9d8 100644 (file)
@@ -73,13 +73,13 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
  * Returns message data size or errno
  */
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len, int max_size,
-                  struct sk_buff **buf)
+                  unsigned int len, int max_size, struct sk_buff **buf)
 {
-       int dsz, sz, hsz, pos, res, cnt;
+       int dsz, sz, hsz;
+       unsigned char *to;
 
-       dsz = total_len;
-       pos = hsz = msg_hdr_sz(hdr);
+       dsz = len;
+       hsz = msg_hdr_sz(hdr);
        sz = hsz + dsz;
        msg_set_size(hdr, sz);
        if (unlikely(sz > max_size)) {
@@ -91,16 +91,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
        if (!(*buf))
                return -ENOMEM;
        skb_copy_to_linear_data(*buf, hdr, hsz);
-       for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
-               skb_copy_to_linear_data_offset(*buf, pos,
-                                              msg_sect[cnt].iov_base,
-                                              msg_sect[cnt].iov_len);
-               pos += msg_sect[cnt].iov_len;
+       to = (*buf)->data + hsz;
+       if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
+               kfree_skb(*buf);
+               *buf = NULL;
+               return -EFAULT;
        }
-       if (likely(res))
-               return dsz;
-
-       kfree_skb(*buf);
-       *buf = NULL;
-       return -EFAULT;
+       return dsz;
 }
index 5e4ccf5c27df0361aaf26df8023ba79f20057936..559b73a9bf352d9bb7b058a366b81341eae57f15 100644 (file)
@@ -722,6 +722,5 @@ u32 tipc_msg_tot_importance(struct tipc_msg *m);
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len, int max_size,
-                  struct sk_buff **buf);
+                  unsigned int len, int max_size, struct sk_buff **buf);
 #endif
index b3ed2fcab4fbd3a947b6419856641c9a7b315872..c081a7632302ca0798c193b039214466c3180184 100644 (file)
@@ -90,8 +90,7 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
  * tipc_multicast - send a multicast message to local and remote destinations
  */
 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
-                  u32 num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_msg *hdr;
        struct sk_buff *buf;
@@ -114,8 +113,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        msg_set_namelower(hdr, seq->lower);
        msg_set_nameupper(hdr, seq->upper);
        msg_set_hdr_sz(hdr, MCAST_H_SIZE);
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                            &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (unlikely(!buf))
                return res;
 
@@ -436,14 +434,13 @@ exit:
 }
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, u32 num_sect,
-                             unsigned int total_len, int err)
+                             struct iovec const *msg_sect, unsigned int len,
+                             int err)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                            &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (!buf)
                return res;
 
@@ -918,15 +915,14 @@ int tipc_port_recv_msg(struct sk_buff *buf)
  *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
-static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
-                                  unsigned int total_len)
+                                  unsigned int len)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
-                            MAX_MSG_SIZE, &buf);
+       res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (likely(buf))
                tipc_port_recv_msg(buf);
        return res;
@@ -935,8 +931,7 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
 /**
  * tipc_send - send message sections on connection
  */
-int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
-             unsigned int total_len)
+int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        u32 destnode;
@@ -950,11 +945,10 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
        if (!tipc_port_congested(p_ptr)) {
                destnode = port_peernode(p_ptr);
                if (likely(!in_own_node(destnode)))
-                       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                          total_len, destnode);
+                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+                                                          len, destnode);
                else
-                       res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-                                                     total_len);
+                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
 
                if (likely(res != -ELINKCONG)) {
                        p_ptr->congested = 0;
@@ -965,7 +959,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
        }
        if (port_unreliable(p_ptr)) {
                p_ptr->congested = 0;
-               return total_len;
+               return len;
        }
        return -ELINKCONG;
 }
@@ -974,8 +968,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
  * tipc_send2name - send message sections to port name
  */
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg;
@@ -999,36 +992,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 
        if (likely(destport || destnode)) {
                if (likely(in_own_node(destnode)))
-                       res = tipc_port_recv_sections(p_ptr, num_sect,
-                                                     msg_sect, total_len);
+                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
                else if (tipc_own_addr)
                        res = tipc_link_send_sections_fast(p_ptr, msg_sect,
-                                                          num_sect, total_len,
-                                                          destnode);
+                                                          len, destnode);
                else
                        res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
-                                                       num_sect, total_len,
-                                                       TIPC_ERR_NO_NODE);
+                                                       len, TIPC_ERR_NO_NODE);
                if (likely(res != -ELINKCONG)) {
                        if (res > 0)
                                p_ptr->sent++;
                        return res;
                }
                if (port_unreliable(p_ptr)) {
-                       return total_len;
+                       return len;
                }
                return -ELINKCONG;
        }
-       return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-                                        total_len, TIPC_ERR_NO_NAME);
+       return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+                                        TIPC_ERR_NO_NAME);
 }
 
 /**
  * tipc_send2port - send message sections to port identity
  */
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg;
@@ -1046,21 +1035,20 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
        msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
        if (in_own_node(dest->node))
-               res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-                                              total_len);
+               res =  tipc_port_recv_sections(p_ptr, msg_sect, len);
        else if (tipc_own_addr)
-               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                  total_len, dest->node);
+               res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
+                                                  dest->node);
        else
-               res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-                                               total_len, TIPC_ERR_NO_NODE);
+               res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+                                               TIPC_ERR_NO_NODE);
        if (likely(res != -ELINKCONG)) {
                if (res > 0)
                        p_ptr->sent++;
                return res;
        }
        if (port_unreliable(p_ptr)) {
-               return total_len;
+               return len;
        }
        return -ELINKCONG;
 }
index 5a7026b9c3456b716dd8a173b345d11355f1b3be..9122535973430edff99e8ee6070a091975dea457 100644 (file)
@@ -151,24 +151,20 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
  * TIPC messaging routines
  */
 int tipc_port_recv_msg(struct sk_buff *buf);
-int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
-             unsigned int total_len);
+int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len);
+                  struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2port(u32 portref, struct tipc_portid const *dest,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len);
+                  struct iovec const *msg_sect, unsigned int len);
 
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
-                  unsigned int section_count, struct iovec const *msg,
-                  unsigned int total_len);
+                  struct iovec const *msg, unsigned int len);
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, u32 num_sect,
-                             unsigned int total_len, int err);
+                             struct iovec const *msg_sect, unsigned int len,
+                             int err);
 struct sk_buff *tipc_port_get_ports(void);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
 void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
index 6cc7ddd2fb7c9cd26eb3e7508adb9f4bc9093866..3906527259d19f2d18d06d3641eda0b755d34968 100644 (file)
@@ -338,7 +338,7 @@ static int release(struct socket *sock)
                buf = __skb_dequeue(&sk->sk_receive_queue);
                if (buf == NULL)
                        break;
-               if (TIPC_SKB_CB(buf)->handle != 0)
+               if (TIPC_SKB_CB(buf)->handle != NULL)
                        kfree_skb(buf);
                else {
                        if ((sock->state == SS_CONNECTING) ||
@@ -622,13 +622,11 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = tipc_send2name(tport->ref,
                                             &dest->addr.name.name,
                                             dest->addr.name.domain,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                } else if (dest->addrtype == TIPC_ADDR_ID) {
                        res = tipc_send2port(tport->ref,
                                             &dest->addr.id,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                } else if (dest->addrtype == TIPC_ADDR_MCAST) {
@@ -641,7 +639,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                                break;
                        res = tipc_multicast(tport->ref,
                                             &dest->addr.nameseq,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                }
@@ -707,8 +704,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
                        break;
                }
 
-               res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
-                               total_len);
+               res = tipc_send(tport->ref, m->msg_iov, total_len);
                if (likely(res != -ELINKCONG))
                        break;
                if (timeout_val <= 0L) {
@@ -1368,7 +1364,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
                return TIPC_ERR_OVERLOAD;
 
        /* Enqueue message */
-       TIPC_SKB_CB(buf)->handle = 0;
+       TIPC_SKB_CB(buf)->handle = NULL;
        __skb_queue_tail(&sk->sk_receive_queue, buf);
        skb_set_owner_r(buf, sk);
 
@@ -1691,7 +1687,7 @@ restart:
                /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
                buf = __skb_dequeue(&sk->sk_receive_queue);
                if (buf) {
-                       if (TIPC_SKB_CB(buf)->handle != 0) {
+                       if (TIPC_SKB_CB(buf)->handle != NULL) {
                                kfree_skb(buf);
                                goto restart;
                        }
index 86de99ad297605d04356f9efd7be174d2f7c7993..c1f403bed683ee8653356870f35457fe6e7f18eb 100644 (file)
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
        return 0;
 }
 
+static void unix_sock_inherit_flags(const struct socket *old,
+                                   struct socket *new)
+{
+       if (test_bit(SOCK_PASSCRED, &old->flags))
+               set_bit(SOCK_PASSCRED, &new->flags);
+       if (test_bit(SOCK_PASSSEC, &old->flags))
+               set_bit(SOCK_PASSSEC, &new->flags);
+}
+
 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
        /* attach accepted sock to socket */
        unix_state_lock(tsk);
        newsock->state = SS_CONNECTED;
+       unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
        return 0;
index d591091603bfe4da1d6a87810287bdd5aaecc6da..86fa0f3b2cafa46d47db9cd03e46dfe89c22d238 100644 (file)
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        rep->udiag_family = AF_UNIX;
        rep->udiag_type = sk->sk_type;
        rep->udiag_state = sk->sk_state;
+       rep->pad = 0;
        rep->udiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rep->udiag_cookie);
 
index 1e743d2148565a1010114b468366b5285a56f02e..5dcd9c067bf0ac72a3c6e1067818053e31f7b748 100644 (file)
@@ -63,11 +63,11 @@ void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
 {
        wimax_dev->state = state;
 }
-extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
 
 #ifdef CONFIG_DEBUG_FS
-extern int wimax_debugfs_add(struct wimax_dev *);
-extern void wimax_debugfs_rm(struct wimax_dev *);
+int wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
 #else
 static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
 {
@@ -76,13 +76,13 @@ static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
 static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
 #endif
 
-extern void wimax_id_table_add(struct wimax_dev *);
-extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
-extern void wimax_id_table_rm(struct wimax_dev *);
-extern void wimax_id_table_release(void);
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
 
-extern int wimax_rfkill_add(struct wimax_dev *);
-extern void wimax_rfkill_rm(struct wimax_dev *);
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
 
 extern struct genl_family wimax_gnl_family;
 extern struct genl_multicast_group wimax_gnl_mcg;
index 67153964aad2059652ffd34d79c956585cc9c1e3..aff959e5a1b360e7cb467cade7f7d617544b3909 100644 (file)
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
        /* check and set up bitrates */
        ieee80211_set_bitrate_flags(wiphy);
 
-
+       rtnl_lock();
        res = device_add(&rdev->wiphy.dev);
-       if (res)
-               return res;
-
-       res = rfkill_register(rdev->rfkill);
        if (res) {
-               device_del(&rdev->wiphy.dev);
+               rtnl_unlock();
                return res;
        }
 
-       rtnl_lock();
        /* set up regulatory info */
        wiphy_regulatory_register(wiphy);
 
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
 
        rdev->wiphy.registered = true;
        rtnl_unlock();
+
+       res = rfkill_register(rdev->rfkill);
+       if (res) {
+               rfkill_destroy(rdev->rfkill);
+               rdev->rfkill = NULL;
+               wiphy_unregister(&rdev->wiphy);
+               return res;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
                rtnl_unlock();
                __count == 0; }));
 
-       rfkill_unregister(rdev->rfkill);
+       if (rdev->rfkill)
+               rfkill_unregister(rdev->rfkill);
 
        rtnl_lock();
        rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
-               if (rfkill_blocked(rdev->rfkill))
-                       return notifier_from_errno(-ERFKILL);
                ret = cfg80211_can_add_interface(rdev, wdev->iftype);
                if (ret)
                        return notifier_from_errno(ret);
index b43efac4efca786d3078c45220747e88eea168e2..af10e59af2d86f418bd7cc4e69914c9bb31831c7 100644 (file)
@@ -234,10 +234,10 @@ struct cfg80211_beacon_registration {
 };
 
 /* free object */
-extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
+void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
 
-extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
-                              char *newname);
+int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
+                       char *newname);
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
 
@@ -402,6 +402,9 @@ static inline int
 cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
                           enum nl80211_iftype iftype)
 {
+       if (rfkill_blocked(rdev->rfkill))
+               return -ERFKILL;
+
        return cfg80211_can_change_interface(rdev, NULL, iftype);
 }
 
index 39bff7d367687fbdd4d220d76e34d66e4e586a9c..403fe29c024db86f732b0c0d74b8d97d4cfc84bf 100644 (file)
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
                                if (chan->flags & IEEE80211_CHAN_DISABLED)
                                        continue;
                                wdev->wext.ibss.chandef.chan = chan;
+                               wdev->wext.ibss.chandef.center_freq1 =
+                                       chan->center_freq;
                                break;
                        }
 
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
        if (chan) {
                wdev->wext.ibss.chandef.chan = chan;
                wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+               wdev->wext.ibss.chandef.center_freq1 = freq;
                wdev->wext.ibss.channel_fixed = true;
        } else {
                /* cfg80211_ibss_wext_join will pick one if needed */
index 2838206ddad3b5b05ea2eb2a6bfbc281593f9b41..cbbef88a8ebd125fc4797e4493968e5c67108771 100644 (file)
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
                change = true;
        }
 
-       if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
 
-       if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
index 7d604c06c3dc38d1155366a52f184971be1197e3..a271c27fac774ce987c0db6f1330ffbfca6dc7f7 100644 (file)
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
        struct ieee80211_radiotap_header *radiotap_header,
        int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
 {
+       /* check the radiotap header can actually be present */
+       if (max_length < sizeof(struct ieee80211_radiotap_header))
+               return -EINVAL;
+
        /* Linux only supports version 0 radiotap format */
        if (radiotap_header->it_version)
                return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
                         */
 
                        if ((unsigned long)iterator->_arg -
-                           (unsigned long)iterator->_rtheader >
+                           (unsigned long)iterator->_rtheader +
+                           sizeof(uint32_t) >
                            (unsigned long)iterator->_max_length)
                                return -EINVAL;
                }
index 65acbebd371127b5eeb70f5bc82a6c1028924c26..b533ed71dafff1d0b3f7c0a6049630d9e2cdda38 100644 (file)
@@ -1,8 +1,8 @@
 #ifndef __WIRELESS_SYSFS_H
 #define __WIRELESS_SYSFS_H
 
-extern int wiphy_sysfs_init(void);
-extern void wiphy_sysfs_exit(void);
+int wiphy_sysfs_init(void);
+void wiphy_sysfs_exit(void);
 
 extern struct class ieee80211_class;
 
index 716502ada53ba00b1877c2bc6d140fd50915ee7a..0622d319e1f269c31e8a6af27e4c986832948534 100644 (file)
@@ -130,7 +130,7 @@ static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
        return h & hmask;
 }
 
-extern struct hlist_head *xfrm_hash_alloc(unsigned int sz);
-extern void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
+struct hlist_head *xfrm_hash_alloc(unsigned int sz);
+void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
 
 #endif /* _XFRM_HASH_H */
index 2906d520eea7c2b7636fc94f3a60f5131701f57d..ccfdc7115a83f709e2a5980c5dc0d65cd5859467 100644 (file)
@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
        const int plen = skb->len;
        int dlen = IPCOMP_SCRATCH_SIZE;
        u8 *start = skb->data;
-       const int cpu = get_cpu();
-       u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
-       struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+       struct crypto_comp *tfm;
+       u8 *scratch;
        int err;
 
        local_bh_disable();
+       scratch = *this_cpu_ptr(ipcomp_scratches);
+       tfm = *this_cpu_ptr(ipcd->tfms);
        err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
-       local_bh_enable();
        if (err)
                goto out;
 
@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
-       put_cpu();
+       local_bh_enable();
 
        pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
        return 0;
 
 out:
-       put_cpu();
+       local_bh_enable();
        return err;
 }
 
@@ -220,8 +220,8 @@ static void ipcomp_free_scratches(void)
 
 static void * __percpu *ipcomp_alloc_scratches(void)
 {
-       int i;
        void * __percpu *scratches;
+       int i;
 
        if (ipcomp_scratch_users++)
                return ipcomp_scratches;
@@ -233,7 +233,9 @@ static void * __percpu *ipcomp_alloc_scratches(void)
        ipcomp_scratches = scratches;
 
        for_each_possible_cpu(i) {
-               void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+               void *scratch;
+
+               scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
                if (!scratch)
                        return NULL;
                *per_cpu_ptr(scratches, i) = scratch;
index ed38d5d81f9e1f4890cf36447713badd48691489..9a91f7431c411b706810a40bccc3167617c74aec 100644 (file)
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 
        atomic_inc(&policy->genid);
 
-       del_timer(&policy->polq.hold_timer);
+       if (del_timer(&policy->polq.hold_timer))
+               xfrm_pol_put(policy);
        xfrm_queue_purge(&policy->polq.hold_queue);
 
        if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
 
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice_init(&pq->hold_queue, &list);
-       del_timer(&pq->hold_timer);
+       if (del_timer(&pq->hold_timer))
+               xfrm_pol_put(old);
        spin_unlock_bh(&pq->hold_queue.lock);
 
        if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice(&list, &pq->hold_queue);
        pq->timeout = XFRM_QUEUE_TMO_MIN;
-       mod_timer(&pq->hold_timer, jiffies);
+       if (!mod_timer(&pq->hold_timer, jiffies))
+               xfrm_pol_hold(new);
        spin_unlock_bh(&pq->hold_queue.lock);
 }
 
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
 
        spin_lock(&pq->hold_queue.lock);
        skb = skb_peek(&pq->hold_queue);
+       if (!skb) {
+               spin_unlock(&pq->hold_queue.lock);
+               goto out;
+       }
        dst = skb_dst(skb);
        sk = skb->sk;
        xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
                        goto purge_queue;
 
                pq->timeout = pq->timeout << 1;
-               mod_timer(&pq->hold_timer, jiffies + pq->timeout);
-               return;
+               if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
+                       xfrm_pol_hold(pol);
+       goto out;
        }
 
        dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
                err = dst_output(skb);
        }
 
+out:
+       xfrm_pol_put(pol);
        return;
 
 purge_queue:
        pq->timeout = 0;
        xfrm_queue_purge(&pq->hold_queue);
+       xfrm_pol_put(pol);
 }
 
 static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,15 @@ static int xdst_queue_output(struct sk_buff *skb)
        unsigned long sched_next;
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
-       struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
+       struct xfrm_policy *pol = xdst->pols[0];
+       struct xfrm_policy_queue *pq = &pol->polq;
+       const struct sk_buff *fclone = skb + 1;
+
+       if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+                    fclone->fclone == SKB_FCLONE_CLONE)) {
+               kfree_skb(skb);
+               return 0;
+       }
 
        if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
                kfree_skb(skb);
@@ -1850,10 +1869,12 @@ static int xdst_queue_output(struct sk_buff *skb)
        if (del_timer(&pq->hold_timer)) {
                if (time_before(pq->hold_timer.expires, sched_next))
                        sched_next = pq->hold_timer.expires;
+               xfrm_pol_put(pol);
        }
 
        __skb_queue_tail(&pq->hold_queue, skb);
-       mod_timer(&pq->hold_timer, sched_next);
+       if (!mod_timer(&pq->hold_timer, sched_next))
+               xfrm_pol_hold(pol);
 
        spin_unlock_bh(&pq->hold_queue.lock);
 
index 8dafe6d3c6e41ebd20e5d0347d4ab9b0e6326a34..dab57daae40856030790fa8070068a59d82220af 100644 (file)
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
-                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
                return 0;
 
        diff = x->replay.seq - seq;
-       if (diff >= min_t(unsigned int, x->props.replay_window,
-                         sizeof(x->replay.bitmap) * 8)) {
+       if (diff >= x->props.replay_window) {
                x->stats.replay_window++;
                goto err;
        }
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
-                   (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+                   (replay_esn->oseq - preplay_esn->oseq
+                    < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (!x->replay_maxdiff)
-                       break;
-
-               if (replay_esn->seq_hi == preplay_esn->seq_hi)
-                       seq_diff = replay_esn->seq - preplay_esn->seq;
-               else
-                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
-
-               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
-                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
-               else
-                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
-
-               if (seq_diff < x->replay_maxdiff &&
-                   oseq_diff < x->replay_maxdiff) {
+               if (x->replay_maxdiff) {
+                       if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                               seq_diff = replay_esn->seq - preplay_esn->seq;
+                       else
+                               seq_diff = ~preplay_esn->seq + replay_esn->seq
+                                          + 1;
 
-                       if (x->xflags & XFRM_TIME_DEFER)
-                               event = XFRM_REPLAY_TIMEOUT;
+                       if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                               oseq_diff = replay_esn->oseq
+                                           - preplay_esn->oseq;
                        else
-                               return;
+                               oseq_diff = ~preplay_esn->oseq
+                                           + replay_esn->oseq + 1;
+
+                       if (seq_diff >= x->replay_maxdiff ||
+                           oseq_diff >= x->replay_maxdiff)
+                               break;
                }
 
+               if (x->xflags & XFRM_TIME_DEFER)
+                       event = XFRM_REPLAY_TIMEOUT;
+               else
+                       return;
+
                break;
 
        case XFRM_REPLAY_TIMEOUT:
index b9c3f9e943a9159d1617feec49c751055ea4dd55..68c2f357a18389d9debc35401afe1295c4341959 100644 (file)
@@ -468,7 +468,7 @@ expired:
        }
 
        err = __xfrm_state_delete(x);
-       if (!err && x->id.spi)
+       if (!err)
                km_state_expired(x, 1, 0);
 
        xfrm_audit_state_delete(x, err ? 0 : 1,
@@ -815,7 +815,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                        xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
-       if (best)
+       if (best || acquire_in_progress)
                goto found;
 
        h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
@@ -824,7 +824,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
+                   xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
index 3f565e495ac68cea83e1d52cf7db7e820fe777ad..f964d4c00ffb53457aa46b24f0225249c1d46b7c 100644 (file)
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
        memcpy(&x->sel, &p->sel, sizeof(x->sel));
        memcpy(&x->lft, &p->lft, sizeof(x->lft));
        x->props.mode = p->mode;
-       x->props.replay_window = p->replay_window;
+       x->props.replay_window = min_t(unsigned int, p->replay_window,
+                                       sizeof(x->replay.bitmap) * 8);
        x->props.reqid = p->reqid;
        x->props.family = p->family;
        memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (x->km.state != XFRM_STATE_VALID)
                goto out;
 
-       err = xfrm_replay_verify_len(x->replay_esn, rp);
+       err = xfrm_replay_verify_len(x->replay_esn, re);
        if (err)
                goto out;
 
index 47016c304c847efffb96da5358224a9b5a93cc5e..66cad506b8a2a944f2873856ef0078445f673b8b 100755 (executable)
@@ -3975,8 +3975,8 @@ sub string_find_replace {
 # check for new externs in .h files.
                if ($realfile =~ /\.h$/ &&
                    $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
-                       if (WARN("AVOID_EXTERNS",
-                                "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+                       if (CHK("AVOID_EXTERNS",
+                               "extern prototypes should be avoided in .h files\n" . $herecurr) &&
                            $fix) {
                                $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
                        }
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci
deleted file mode 100644 (file)
index 562ec88..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/// Reimplement a call to devm_request_mem_region followed by a call to ioremap
-/// or ioremap_nocache by a call to devm_request_and_ioremap.
-/// Devm_request_and_ioremap was introduced in
-/// 72f8c0bfa0de64c68ee59f40eb9b2683bffffbb0.  It makes the code much more
-/// concise.
-///
-///
-// Confidence: High
-// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.  GPLv2.
-// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Comments:
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual org
-virtual report
-virtual context
-
-@nm@
-expression myname;
-identifier i;
-@@
-
-struct platform_driver i = { .driver = { .name = myname } };
-
-@depends on patch@
-expression dev,res,size;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,
--                              \(res->name\|dev_name(dev)\))) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-// this rule is separate from the previous one, because a single file can
-// have multiple values of myname
-@depends on patch@
-expression dev,res,size;
-expression nm.myname;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,myname)) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-
-@pb depends on org || report || context@
-expression dev,res,size;
-expression nm.myname;
-position p1,p2;
-@@
-
-*if
-  (!devm_request_mem_region@p1(dev, res->start, size,
-                              \(res->name\|dev_name(dev)\|myname\))) {
-   ...
-   return ...;
-}
-... when != res->start
-(
-*devm_ioremap@p2(dev,res->start,size)
-|
-*devm_ioremap_nocache@p2(dev,res->start,size)
-)
-... when any
-    when != res->start
-
-@script:python depends on org@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-cocci.print_main("INFO: replace by devm_request_and_ioremap",p1)
-cocci.print_secs("",p2)
-
-@script:python depends on report@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-msg = "INFO: devm_request_mem_region followed by ioremap on line %s can be replaced by devm_request_and_ioremap" % (p2[0].line)
-coccilib.report.print_report(p1[0],msg)
index 17df3051747a93ee68a75255764408c813f1a14e..e25732b5d701127d904da3b608b825a520fce3ca 100755 (executable)
@@ -13,7 +13,7 @@ import sys
 import string
 
 def usage():
-       print """usage: show_delta [<options>] <filename>
+       print ("""usage: show_delta [<options>] <filename>
 
 This program parses the output from a set of printk message lines which
 have time data prefixed because the CONFIG_PRINTK_TIME option is set, or
@@ -35,7 +35,7 @@ ex: $ dmesg >timefile
 
 will show times relative to the line in the kernel output
 starting with "NET4".
-"""
+""")
        sys.exit(1)
 
 # returns a tuple containing the seconds and text for each message line
@@ -94,11 +94,11 @@ def main():
        try:
                lines = open(filein,"r").readlines()
        except:
-               print "Problem opening file: %s" % filein
+               print ("Problem opening file: %s" % filein)
                sys.exit(1)
 
        if base_str:
-               print 'base= "%s"' % base_str
+               print ('base= "%s"' % base_str)
                # assume a numeric base.  If that fails, try searching
                # for a matching line.
                try:
@@ -117,13 +117,13 @@ def main():
                                        # stop at first match
                                        break
                        if not found:
-                               print 'Couldn\'t find line matching base pattern "%s"' % base_str
+                               print ('Couldn\'t find line matching base pattern "%s"' % base_str)
                                sys.exit(1)
        else:
                base_time = 0.0
 
        for line in lines:
-               print convert_line(line, base_time),
+               print (convert_line(line, base_time),)
 
 main()
 
index 74f02e4dddd29eca8894fe409984ae525d1e2e96..f1bcfc11cc72e366217041ea05039b397e0b8468 100755 (executable)
@@ -151,13 +151,14 @@ exuberant()
        all_target_sources | xargs $1 -a                        \
        -I __initdata,__exitdata,__initconst,__devinitdata      \
        -I __devinitconst,__cpuinitdata,__initdata_memblock     \
-       -I __refdata,__attribute                                \
+       -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
        -I __read_mostly,__aligned,____cacheline_aligned        \
        -I ____cacheline_aligned_in_smp                         \
+       -I __cacheline_aligned,__cacheline_aligned_in_smp       \
        -I ____cacheline_internodealigned_in_smp                \
        -I __used,__packed,__packed2__,__must_check,__must_hold \
-       -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL                      \
+       -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
        -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
        -I static,const                                         \
        --extra=+f --c-kinds=+px                                \
index 95c2b2689a03aa521bc923323f521a8c191d107d..7db9954f1af2c56c42400746f367f5f4d9906ca8 100644 (file)
@@ -580,15 +580,13 @@ static struct aa_namespace *__next_namespace(struct aa_namespace *root,
 
        /* check if the next ns is a sibling, parent, gp, .. */
        parent = ns->parent;
-       while (parent) {
+       while (ns != root) {
                mutex_unlock(&ns->lock);
                next = list_entry_next(ns, base.list);
                if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
                        mutex_lock(&next->lock);
                        return next;
                }
-               if (parent == root)
-                       return NULL;
                ns = parent;
                parent = parent->parent;
        }
index d6222ba4e9190207f3ddabbb58161e25f397169b..532471d0b3a0facd6c7cb70a930e4028ca6d0761 100644 (file)
  * it should be.
  */
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 
 #include "include/apparmor.h"
 #include "include/crypto.h"
 
 static unsigned int apparmor_hash_size;
 
-static struct crypto_hash *apparmor_tfm;
+static struct crypto_shash *apparmor_tfm;
 
 unsigned int aa_hash_size(void)
 {
@@ -32,35 +32,33 @@ unsigned int aa_hash_size(void)
 int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
                         size_t len)
 {
-       struct scatterlist sg[2];
-       struct hash_desc desc = {
-               .tfm = apparmor_tfm,
-               .flags = 0
-       };
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(apparmor_tfm)];
+       } desc;
        int error = -ENOMEM;
        u32 le32_version = cpu_to_le32(version);
 
        if (!apparmor_tfm)
                return 0;
 
-       sg_init_table(sg, 2);
-       sg_set_buf(&sg[0], &le32_version, 4);
-       sg_set_buf(&sg[1], (u8 *) start, len);
-
        profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
        if (!profile->hash)
                goto fail;
 
-       error = crypto_hash_init(&desc);
+       desc.shash.tfm = apparmor_tfm;
+       desc.shash.flags = 0;
+
+       error = crypto_shash_init(&desc.shash);
        if (error)
                goto fail;
-       error = crypto_hash_update(&desc, &sg[0], 4);
+       error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
        if (error)
                goto fail;
-       error = crypto_hash_update(&desc, &sg[1], len);
+       error = crypto_shash_update(&desc.shash, (u8 *) start, len);
        if (error)
                goto fail;
-       error = crypto_hash_final(&desc, profile->hash);
+       error = crypto_shash_final(&desc.shash, profile->hash);
        if (error)
                goto fail;
 
@@ -75,19 +73,19 @@ fail:
 
 static int __init init_profile_hash(void)
 {
-       struct crypto_hash *tfm;
+       struct crypto_shash *tfm;
 
        if (!apparmor_initialized)
                return 0;
 
-       tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                int error = PTR_ERR(tfm);
                AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
                return error;
        }
        apparmor_tfm = tfm;
-       apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm);
+       apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
 
        aa_info_message("AppArmor sha1 policy hashing enabled");
 
index f2d4b6348cbc0a5f1ec0abdc0d2d975ad34ce493..c28b0f20ab53ed69da7c1e6b8c60a8099370a4f8 100644 (file)
@@ -360,7 +360,9 @@ static inline void aa_put_replacedby(struct aa_replacedby *p)
 static inline void __aa_update_replacedby(struct aa_profile *orig,
                                          struct aa_profile *new)
 {
-       struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile);
+       struct aa_profile *tmp;
+       tmp = rcu_dereference_protected(orig->replacedby->profile,
+                                       mutex_is_locked(&orig->ns->lock));
        rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
        orig->flags |= PFLAG_INVALID;
        aa_put_profile(tmp);
index 6172509fa2b7441fbda60d1cd6e5b35fce432dc8..705c2879d3a94a79e10d1190468483c2f9d7192a 100644 (file)
@@ -563,7 +563,8 @@ void __init aa_free_root_ns(void)
 static void free_replacedby(struct aa_replacedby *r)
 {
        if (r) {
-               aa_put_profile(rcu_dereference(r->profile));
+               /* r->profile will not be updated any more as r is dead */
+               aa_put_profile(rcu_dereference_protected(r->profile, true));
                kzfree(r);
        }
 }
@@ -609,6 +610,7 @@ void aa_free_profile(struct aa_profile *profile)
        aa_put_dfa(profile->policy.dfa);
        aa_put_replacedby(profile->replacedby);
 
+       kzfree(profile->hash);
        kzfree(profile);
 }
 
index 8d8d97dbb389b1d305352cee29e4dbcc0191af64..234bc2ab450c61b42b1db2b53f631ab72bc48a39 100644 (file)
@@ -302,18 +302,19 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                                "faddr", "fport");
                                break;
                        }
+#if IS_ENABLED(CONFIG_IPV6)
                        case AF_INET6: {
                                struct inet_sock *inet = inet_sk(sk);
-                               struct ipv6_pinfo *inet6 = inet6_sk(sk);
 
-                               print_ipv6_addr(ab, &inet6->rcv_saddr,
+                               print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
                                                inet->inet_sport,
                                                "laddr", "lport");
-                               print_ipv6_addr(ab, &inet6->daddr,
+                               print_ipv6_addr(ab, &sk->sk_v6_daddr,
                                                inet->inet_dport,
                                                "faddr", "fport");
                                break;
                        }
+#endif
                        case AF_UNIX:
                                u = unix_sk(sk);
                                if (u->path.dentry) {
index dad36a6ab45f628860ef4c5c097d2a5abce15b4a..fc3e6628a8642e027c992cf500fd4c0a921c85fc 100644 (file)
@@ -746,7 +746,6 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
  * @tclass: target security class
  * @requested: requested permissions, interpreted based on @tclass
  * @auditdata: auxiliary audit data
- * @flags: VFS walk flags
  *
  * Check the AVC to determine whether the @requested permissions are granted
  * for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -756,17 +755,15 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
  * permissions are granted, -%EACCES if any permissions are denied, or
  * another -errno upon other errors.
  */
-int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
-                      u32 requested, struct common_audit_data *auditdata,
-                      unsigned flags)
+int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
+                u32 requested, struct common_audit_data *auditdata)
 {
        struct av_decision avd;
        int rc, rc2;
 
        rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
 
-       rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
-                       flags);
+       rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
        if (rc2)
                return rc2;
        return rc;
index a5091ec06aa62816798510e40a1bcf005d2abd3d..c540795fb3f2647619cb4705281872e93592e21e 100644 (file)
@@ -1502,7 +1502,7 @@ static int cred_has_capability(const struct cred *cred,
 
        rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
        if (audit == SECURITY_CAP_AUDIT) {
-               int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
+               int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
                if (rc2)
                        return rc2;
        }
@@ -1525,8 +1525,7 @@ static int task_has_system(struct task_struct *tsk,
 static int inode_has_perm(const struct cred *cred,
                          struct inode *inode,
                          u32 perms,
-                         struct common_audit_data *adp,
-                         unsigned flags)
+                         struct common_audit_data *adp)
 {
        struct inode_security_struct *isec;
        u32 sid;
@@ -1539,7 +1538,7 @@ static int inode_has_perm(const struct cred *cred,
        sid = cred_sid(cred);
        isec = inode->i_security;
 
-       return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
+       return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
 }
 
 /* Same as inode_has_perm, but pass explicit audit data containing
@@ -1554,7 +1553,7 @@ static inline int dentry_has_perm(const struct cred *cred,
 
        ad.type = LSM_AUDIT_DATA_DENTRY;
        ad.u.dentry = dentry;
-       return inode_has_perm(cred, inode, av, &ad, 0);
+       return inode_has_perm(cred, inode, av, &ad);
 }
 
 /* Same as inode_has_perm, but pass explicit audit data containing
@@ -1569,7 +1568,7 @@ static inline int path_has_perm(const struct cred *cred,
 
        ad.type = LSM_AUDIT_DATA_PATH;
        ad.u.path = *path;
-       return inode_has_perm(cred, inode, av, &ad, 0);
+       return inode_has_perm(cred, inode, av, &ad);
 }
 
 /* Same as path_has_perm, but uses the inode from the file struct. */
@@ -1581,7 +1580,7 @@ static inline int file_path_has_perm(const struct cred *cred,
 
        ad.type = LSM_AUDIT_DATA_PATH;
        ad.u.path = file->f_path;
-       return inode_has_perm(cred, file_inode(file), av, &ad, 0);
+       return inode_has_perm(cred, file_inode(file), av, &ad);
 }
 
 /* Check whether a task can use an open file descriptor to
@@ -1617,7 +1616,7 @@ static int file_has_perm(const struct cred *cred,
        /* av is zero if only checking access to the descriptor. */
        rc = 0;
        if (av)
-               rc = inode_has_perm(cred, inode, av, &ad, 0);
+               rc = inode_has_perm(cred, inode, av, &ad);
 
 out:
        return rc;
@@ -3929,7 +3928,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                if (snum) {
                        int low, high;
 
-                       inet_get_local_port_range(&low, &high);
+                       inet_get_local_port_range(sock_net(sk), &low, &high);
 
                        if (snum < max(PROT_SOCK, low) || snum > high) {
                                err = sel_netport_sid(sk->sk_protocol,
@@ -4668,7 +4667,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -4678,7 +4677,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -4710,7 +4709,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
                                        const struct net_device *in,
                                        const struct net_device *out,
@@ -4837,7 +4836,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
@@ -4847,7 +4846,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
index 92d0ab561db80cb4aa253815149a35e02d6175d1..f53ee3c58d0ffd5099879f2bcdc160c88c209c86 100644 (file)
@@ -130,7 +130,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
                            u16 tclass, u32 requested,
                            struct av_decision *avd,
                            int result,
-                           struct common_audit_data *a, unsigned flags)
+                           struct common_audit_data *a)
 {
        u32 audited, denied;
        audited = avc_audit_required(requested, avd, result, 0, &denied);
@@ -138,7 +138,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
                return 0;
        return slow_avc_audit(ssid, tsid, tclass,
                              requested, audited, denied,
-                             a, flags);
+                             a, 0);
 }
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -147,17 +147,9 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
                         unsigned flags,
                         struct av_decision *avd);
 
-int avc_has_perm_flags(u32 ssid, u32 tsid,
-                      u16 tclass, u32 requested,
-                      struct common_audit_data *auditdata,
-                      unsigned);
-
-static inline int avc_has_perm(u32 ssid, u32 tsid,
-                              u16 tclass, u32 requested,
-                              struct common_audit_data *auditdata)
-{
-       return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
-}
+int avc_has_perm(u32 ssid, u32 tsid,
+                u16 tclass, u32 requested,
+                struct common_audit_data *auditdata);
 
 u32 avc_policy_seqno(void);
 
index 69a2455b447210d42c2bf2c37b23fbe9035eada9..e6c727b317fbd8fa5a1d33fef7133aa601eeddd7 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 
 #include <sound/core.h>
@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
        .mmap           = pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = 0xffffffff;
-
 int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
                   struct snd_pcm **rpcm)
 {
@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
        pcm->private_data = client;
        pcm->private_free = pxa2xx_pcm_free_dma_buffers;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = 0xffffffff;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out;
 
        if (play) {
                int stream = SNDRV_PCM_STREAM_PLAYBACK;
index 98969541cbcc9c62ff81afb9818d2399dd6e7eb0..bea523a5d852e11f9d06e1d682e065b699f347c8 100644 (file)
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
 static int snd_compr_free(struct inode *inode, struct file *f)
 {
        struct snd_compr_file *data = f->private_data;
+       struct snd_compr_runtime *runtime = data->stream.runtime;
+
+       switch (runtime->state) {
+       case SNDRV_PCM_STATE_RUNNING:
+       case SNDRV_PCM_STATE_DRAINING:
+       case SNDRV_PCM_STATE_PAUSED:
+               data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+               break;
+       default:
+               break;
+       }
+
        data->stream.ops->free(&data->stream);
        kfree(data->stream.runtime->buffer);
        kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
        struct snd_compr *compr;
 
        compr = device->device_data;
-       snd_unregister_device(compr->direction, compr->card, compr->device);
+       snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+               compr->device);
        return 0;
 }
 
index 445ca481d8d30116bc075e3ac2cee881f1149863..bf578ba2677e72566187619e1e0515afe93fdfd6 100644 (file)
@@ -175,6 +175,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
 { 0x54524106, 0xffffffff, "TR28026",           NULL,           NULL },
 { 0x54524108, 0xffffffff, "TR28028",           patch_tritech_tr28028,  NULL }, // added by xin jin [07/09/99]
 { 0x54524123, 0xffffffff, "TR28602",           NULL,           NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
+{ 0x54584e03, 0xffffffff, "TLV320AIC27",       NULL,           NULL },
 { 0x54584e20, 0xffffffff, "TLC320AD9xC",       NULL,           NULL },
 { 0x56494161, 0xffffffff, "VIA1612A",          NULL,           NULL }, // modified ICE1232 with S/PDIF
 { 0x56494170, 0xffffffff, "VIA1617A",          patch_vt1617a,  NULL }, // modified VT1616 with S/PDIF
index ac41e9cdc976a1c190c51684519abb9c475c2c59..26ad4f0aade3fcf2c26940c008fae9a55c63244c 100644 (file)
@@ -3531,7 +3531,7 @@ static int create_capture_mixers(struct hda_codec *codec)
                if (!multi)
                        err = create_single_cap_vol_ctl(codec, n, vol, sw,
                                                        inv_dmic);
-               else if (!multi_cap_vol)
+               else if (!multi_cap_vol && !inv_dmic)
                        err = create_bind_cap_vol_ctl(codec, n, vol, sw);
                else
                        err = create_multi_cap_vol_ctl(codec);
index b524f89a1f13a7d74c51c8231684f971662ccc15..18d9725015855c0ce3f33d174a3dfcd55836efeb 100644 (file)
@@ -111,6 +111,9 @@ enum {
 /* 0x0009 - 0x0014 -> 12 test regs */
 /* 0x0015 - visibility reg */
 
+/* Cirrus Logic CS4208 */
+#define CS4208_VENDOR_NID      0x24
+
 /*
  * Cirrus Logic CS4210
  *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+static const struct hda_verb cs4208_coef_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x24, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
+       {} /* terminator */
+};
+
 /* Errata: CS4207 rev C0/C1/C2 Silicon
  *
  * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
                /* init_verb sequence for C0/C1/C2 errata*/
                snd_hda_sequence_write(codec, cs_errata_init_verbs);
                snd_hda_sequence_write(codec, cs_coef_init_verbs);
+       } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
+               snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
        }
 
        snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
        {} /* terminator */
 };
 
+static const struct hda_pintbl mba6_pincfgs[] = {
+       { 0x10, 0x032120f0 }, /* HP */
+       { 0x11, 0x500000f0 },
+       { 0x12, 0x90100010 }, /* Speaker */
+       { 0x13, 0x500000f0 },
+       { 0x14, 0x500000f0 },
+       { 0x15, 0x770000f0 },
+       { 0x16, 0x770000f0 },
+       { 0x17, 0x430000f0 },
+       { 0x18, 0x43ab9030 }, /* Mic */
+       { 0x19, 0x770000f0 },
+       { 0x1a, 0x770000f0 },
+       { 0x1b, 0x770000f0 },
+       { 0x1c, 0x90a00090 },
+       { 0x1d, 0x500000f0 },
+       { 0x1e, 0x500000f0 },
+       { 0x1f, 0x500000f0 },
+       { 0x20, 0x500000f0 },
+       { 0x21, 0x430000f0 },
+       { 0x22, 0x430000f0 },
+       {} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
 
 /*
  * CS4208 support:
- * Its layout is no longer compatible with CS4206/CS4207, and the generic
- * parser seems working fairly well, except for trivial fixups.
+ * Its layout is no longer compatible with CS4206/CS4207
  */
 enum {
+       CS4208_MBA6,
        CS4208_GPIO0,
 };
 
 static const struct hda_model_fixup cs4208_models[] = {
        { .id = CS4208_GPIO0, .name = "gpio0" },
+       { .id = CS4208_MBA6, .name = "mba6" },
        {}
 };
 
 static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
        /* codec SSID */
-       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0),
-       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0),
+       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
        {} /* terminator */
 };
 
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
 }
 
 static const struct hda_fixup cs4208_fixups[] = {
+       [CS4208_MBA6] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = mba6_pincfgs,
+               .chained = true,
+               .chain_id = CS4208_GPIO0,
+       },
        [CS4208_GPIO0] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs4208_fixup_gpio0,
        },
 };
 
+/* correct the 0dB offset of input pins */
+static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
+{
+       unsigned int caps;
+
+       caps = query_amp_caps(codec, adc, HDA_INPUT);
+       caps &= ~(AC_AMPCAP_OFFSET);
+       caps |= 0x02;
+       snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
+}
+
 static int patch_cs4208(struct hda_codec *codec)
 {
        struct cs_spec *spec;
        int err;
 
-       spec = cs_alloc_spec(codec, 0); /* no specific w/a */
+       spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
        if (!spec)
                return -ENOMEM;
 
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
                           cs4208_fixups);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
+       snd_hda_override_wcaps(codec, 0x18,
+                              get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
+       cs4208_fix_amp_caps(codec, 0x18);
+       cs4208_fix_amp_caps(codec, 0x1b);
+       cs4208_fix_amp_caps(codec, 0x1c);
+
        err = cs_parse_auto_config(codec);
        if (err < 0)
                goto error;
index 4edd2d0f9a3ce66e625f7576b3b7053548397a51..ec68eaea0336a008c78fb05c03a4d6c0e3f99c2a 100644 (file)
@@ -3231,6 +3231,7 @@ enum {
        CXT_FIXUP_INC_MIC_BOOST,
        CXT_FIXUP_HEADPHONE_MIC_PIN,
        CXT_FIXUP_HEADPHONE_MIC,
+       CXT_FIXUP_GPIO1,
 };
 
 static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3375,6 +3376,15 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_headphone_mic,
        },
+       [CXT_FIXUP_GPIO1] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x01, AC_VERB_SET_GPIO_MASK, 0x01 },
+                       { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 },
+                       { 0x01, AC_VERB_SET_GPIO_DATA, 0x01 },
+                       { }
+               },
+       },
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3384,6 +3394,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
 
 static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
index 3d8cd04455a623b888a9efccb7bdd31f92df8188..50173d412ac5c0d5fb7095b3e3004e2c63c319a0 100644 (file)
@@ -936,6 +936,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                return;
        }
 
+       /*
+        * always configure channel mapping, it may have been changed by the
+        * user in the meantime
+        */
+       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
+                                  channels, per_pin->chmap,
+                                  per_pin->chmap_set);
+
        /*
         * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
         * sizeof(*dp_ai) to avoid partial match/update problems when
@@ -947,20 +955,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                            "pin=%d channels=%d\n",
                            pin_nid,
                            channels);
-               hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-                                          channels, per_pin->chmap,
-                                          per_pin->chmap_set);
                hdmi_stop_infoframe_trans(codec, pin_nid);
                hdmi_fill_audio_infoframe(codec, pin_nid,
                                            ai.bytes, sizeof(ai));
                hdmi_start_infoframe_trans(codec, pin_nid);
-       } else {
-               /* For non-pcm audio switch, setup new channel mapping
-                * accordingly */
-               if (per_pin->non_pcm != non_pcm)
-                       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-                                                  channels, per_pin->chmap,
-                                                  per_pin->chmap_set);
        }
 
        per_pin->non_pcm = non_pcm;
@@ -1149,32 +1147,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
 }
 
 static void haswell_config_cvts(struct hda_codec *codec,
-                       int pin_id, int mux_id)
+                       hda_nid_t pin_nid, int mux_idx)
 {
        struct hdmi_spec *spec = codec->spec;
-       struct hdmi_spec_per_pin *per_pin;
-       int pin_idx, mux_idx;
-       int curr;
-       int err;
+       hda_nid_t nid, end_nid;
+       int cvt_idx, curr;
+       struct hdmi_spec_per_cvt *per_cvt;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
-               per_pin = get_pin(spec, pin_idx);
+       /* configure all pins, including "no physical connection" ones */
+       end_nid = codec->start_nid + codec->num_nodes;
+       for (nid = codec->start_nid; nid < end_nid; nid++) {
+               unsigned int wid_caps = get_wcaps(codec, nid);
+               unsigned int wid_type = get_wcaps_type(wid_caps);
+
+               if (wid_type != AC_WID_PIN)
+                       continue;
 
-               if (pin_idx == pin_id)
+               if (nid == pin_nid)
                        continue;
 
-               curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+               curr = snd_hda_codec_read(codec, nid, 0,
                                          AC_VERB_GET_CONNECT_SEL, 0);
+               if (curr != mux_idx)
+                       continue;
 
-               /* Choose another unused converter */
-               if (curr == mux_id) {
-                       err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx);
-                       if (err < 0)
-                               return;
-                       snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx);
-                       snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+               /* choose an unassigned converter. The conveters in the
+                * connection list are in the same order as in the codec.
+                */
+               for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
+                       per_cvt = get_cvt(spec, cvt_idx);
+                       if (!per_cvt->assigned) {
+                               snd_printdd("choose cvt %d for pin nid %d\n",
+                                       cvt_idx, nid);
+                               snd_hda_codec_write_cache(codec, nid, 0,
                                            AC_VERB_SET_CONNECT_SEL,
-                                           mux_idx);
+                                           cvt_idx);
+                               break;
+                       }
                }
        }
 }
@@ -1216,7 +1225,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
 
        /* configure unused pins to choose other converters */
        if (is_haswell(codec))
-               haswell_config_cvts(codec, pin_idx, mux_idx);
+               haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
 
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
 
index bc07d369fac43a548db7f1e45273627cc4e7246c..bf313bea70858f8a4abd18ef6c60beca6668fea1 100644 (file)
@@ -2819,6 +2819,15 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
        alc_write_coef_idx(codec, 0x1e, coef | 0x80);
 }
 
+static void alc269_fixup_headset_mic(struct hda_codec *codec,
+                                      const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+}
+
 static void alc271_fixup_dmic(struct hda_codec *codec,
                              const struct hda_fixup *fix, int action)
 {
@@ -3439,6 +3448,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
                /* Set to manual mode */
                val = alc_read_coef_idx(codec, 0x06);
                alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+               /* Enable Line1 input control by verb */
+               val = alc_read_coef_idx(codec, 0x1a);
+               alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
                break;
        }
 }
@@ -3493,6 +3505,15 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
        }
 }
 
+static void alc290_fixup_mono_speakers(struct hda_codec *codec,
+                                      const struct hda_fixup *fix, int action)
+{
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               /* Remove DAC node 0x03, as it seems to be
+                  giving mono output */
+               snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
 enum {
        ALC269_FIXUP_SONY_VAIO,
        ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -3504,6 +3525,7 @@ enum {
        ALC271_FIXUP_DMIC,
        ALC269_FIXUP_PCM_44K,
        ALC269_FIXUP_STEREO_DMIC,
+       ALC269_FIXUP_HEADSET_MIC,
        ALC269_FIXUP_QUANTA_MUTE,
        ALC269_FIXUP_LIFEBOOK,
        ALC269_FIXUP_AMIC,
@@ -3516,9 +3538,11 @@ enum {
        ALC269_FIXUP_HP_GPIO_LED,
        ALC269_FIXUP_INV_DMIC,
        ALC269_FIXUP_LENOVO_DOCK,
+       ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
        ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
        ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+       ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
        ALC269_FIXUP_HEADSET_MODE,
        ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
        ALC269_FIXUP_ASUS_X101_FUNC,
@@ -3531,6 +3555,8 @@ enum {
        ALC269VB_FIXUP_ORDISSIMO_EVE2,
        ALC283_FIXUP_CHROME_BOOK,
        ALC282_FIXUP_ASUS_TX300,
+       ALC283_FIXUP_INT_MIC,
+       ALC290_FIXUP_MONO_SPEAKERS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -3599,6 +3625,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_stereo_dmic,
        },
+       [ALC269_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_headset_mic,
+       },
        [ALC269_FIXUP_QUANTA_MUTE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_quanta_mute,
@@ -3708,6 +3738,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
+       [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
        [ALC269_FIXUP_HEADSET_MODE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode,
@@ -3716,6 +3755,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode_no_hp_mic,
        },
+       [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
        [ALC269_FIXUP_ASUS_X101_FUNC] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_x101_headset_mic,
@@ -3790,6 +3838,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc282_fixup_asus_tx300,
        },
+       [ALC283_FIXUP_INT_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+       },
+       [ALC290_FIXUP_MONO_SPEAKERS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc290_fixup_mono_speakers,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3831,6 +3895,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -3853,6 +3918,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
        SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -3874,7 +3940,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -3938,6 +4004,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"},
        {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"},
        {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
+       {.id = ALC269_FIXUP_HEADSET_MIC, .name = "headset-mic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
@@ -4555,6 +4622,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+       SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
        SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
        SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
index 4f255dfee4504454702bfdec305b1b72820a26ac..f59a321a6d6af04d6dce32751410e4eb3f9d9df0 100644 (file)
@@ -4845,6 +4845,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                        if ((err = hdsp_get_iobox_version(hdsp)) < 0)
                                return err;
                }
+               memset(&hdsp_version, 0, sizeof(hdsp_version));
                hdsp_version.io_type = hdsp->io_type;
                hdsp_version.firmware_rev = hdsp->firmware_rev;
                if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
index 01aecc2b50738353ad8a96f618c01be631981240..0d1c27e911b8dde2cf1b69827e8743a31a67316f 100644 (file)
@@ -65,7 +65,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
         * already bound. If not it means binding failed, and then there
         * is no point in keeping the device instantiated.
         */
-       if (!keywest_ctx->client->driver) {
+       if (!keywest_ctx->client->dev.driver) {
                i2c_unregister_device(keywest_ctx->client);
                keywest_ctx->client = NULL;
                return -ENODEV;
@@ -76,7 +76,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
        list_add_tail(&keywest_ctx->client->detected,
-                     &keywest_ctx->client->driver->clients);
+                     &to_i2c_driver(keywest_ctx->client->dev.driver)->clients);
        return 0;
 }
 
index 3109db7b9017cc08dd23ef6f9c70f9c605bcdf3e..fbb87e3f10193c1077596e90b6bb7f73d8fb215c 100644 (file)
@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
 }
 EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
 
-static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-
 int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &atmel_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
index 53f84085bf1fbd7c1daadc093ebd60d7f5dc7251..1d4c676eb6cc696e78f6722525923c91e3afe936 100644 (file)
@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
        }
 }
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
        pr_debug("%s enter\n", __func__);
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &bf5xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
index 9cb4a80df98eee2efa78b9b3a05a99baf64cc8b1..2a5b43417fd5a5810ac36d1cc258f97987e9066a 100644 (file)
@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
        .silence        = bf5xx_pcm_silence,
 };
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+       int ret;
 
        pr_debug("%s enter\n", __func__);
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &bf5xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
                                SNDRV_DMA_TYPE_DEV, card->dev, size, size);
index c02405cc007db2c88c14b25d17eff81b270f550b..5810a0603f2f1eb17ef5eece60d97ba7f6687e04 100644 (file)
@@ -88,6 +88,7 @@ static int bfin_i2s_hw_params(struct snd_pcm_substream *substream,
        case SNDRV_PCM_FORMAT_S8:
                param.spctl |= 0x70;
                sport->wdsize = 1;
+               break;
        case SNDRV_PCM_FORMAT_S16_LE:
                param.spctl |= 0xf0;
                sport->wdsize = 2;
index 8af04343cc1ac9eab35fe951fd079fd1954da462..259d1ac4492fc610a1454567bdabd52f56dbae5a 100644 (file)
@@ -349,6 +349,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
        val = ucontrol->value.integer.value[0];
        val2 = ucontrol->value.integer.value[1];
 
+       if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
+               return -EINVAL;
+
        err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
        if (err < 0)
                return err;
index b8ba0adacfce1229989f0114ce25eb9711f61d16..80555d7551e68018d5e66485eea1507b10c35b92 100644 (file)
@@ -1225,13 +1225,18 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
        struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
        struct device *dev = codec->dev;
        bool apply_fir, apply_iir;
-       int req, status;
+       unsigned int req;
+       int status;
 
        dev_dbg(dev, "%s: Enter.\n", __func__);
 
        mutex_lock(&drvdata->anc_lock);
 
        req = ucontrol->value.integer.value[0];
+       if (req >= ARRAY_SIZE(enum_anc_state)) {
+               status = -EINVAL;
+               goto cleanup;
+       }
        if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
                req != ANC_APPLY_IIR) {
                dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
index 41cdd164297046c3045c9463ab6183810e02a126..8dbcacd44e6aa5cbf5de1fba91ebdf16c1592ff8 100644 (file)
@@ -1863,7 +1863,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
        struct max98095_pdata *pdata = max98095->pdata;
        int channel = max98095_get_eq_channel(kcontrol->id.name);
        struct max98095_cdata *cdata;
-       int sel = ucontrol->value.integer.value[0];
+       unsigned int sel = ucontrol->value.integer.value[0];
        struct max98095_eq_cfg *coef_set;
        int fs, best, best_val, i;
        int regmask, regsave;
@@ -2016,7 +2016,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
        struct max98095_pdata *pdata = max98095->pdata;
        int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
        struct max98095_cdata *cdata;
-       int sel = ucontrol->value.integer.value[0];
+       unsigned int sel = ucontrol->value.integer.value[0];
        struct max98095_biquad_cfg *coef_set;
        int fs, best, best_val, i;
        int regmask, regsave;
index 651ce09236755ff9938062e3e719926fad856f9a..c91eba504f92bc053dd2a51b8f62c7bff6bcc2d6 100644 (file)
@@ -270,7 +270,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
 static const struct regmap_config pcm1681_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+       .max_register           = 0x13,
        .reg_defaults           = pcm1681_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1681_reg_defaults),
        .writeable_reg          = pcm1681_writeable_reg,
index 2a8eccf64c76565ab5abb96f3436face0dda4f01..7613181123fe393042643847f4e2b5dbf4c89285 100644 (file)
@@ -188,7 +188,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
 static const struct regmap_config pcm1792a_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = 24,
+       .max_register           = 23,
        .reg_defaults           = pcm1792a_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1792a_reg_defaults),
        .writeable_reg          = pcm1792a_writeable_reg,
index 6e3f269243e050fe5149e60807c0e41cbf6e4147..64ad84d8a3061e5e4ba824c7178d10f87921c797 100644 (file)
@@ -674,6 +674,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Left Input */
        {"Left Line1L Mux", "single-ended", "LINE1L"},
        {"Left Line1L Mux", "differential", "LINE1L"},
+       {"Left Line1R Mux", "single-ended", "LINE1R"},
+       {"Left Line1R Mux", "differential", "LINE1R"},
 
        {"Left Line2L Mux", "single-ended", "LINE2L"},
        {"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +692,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Right Input */
        {"Right Line1R Mux", "single-ended", "LINE1R"},
        {"Right Line1R Mux", "differential", "LINE1R"},
+       {"Right Line1L Mux", "single-ended", "LINE1L"},
+       {"Right Line1L Mux", "differential", "LINE1L"},
 
        {"Right Line2R Mux", "single-ended", "LINE2R"},
        {"Right Line2R Mux", "differential", "LINE2R"},
index 8460edce1c3b6b5cc90dff481e5588c75e2285ad..84a63c660ab93b48f090e4085048cc0959cc6aa2 100644 (file)
@@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
        }
 }
 
-static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &davinci_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = davinci_pcm_preallocate_dma_buffer(pcm,
index 9cc5c1f82f093f5b171877e1bd5e5672cf9c46cd..f73c7eff8b237b0d7786cd71005789af12b8c0a3 100644 (file)
@@ -298,14 +298,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &fsl_dma_dmamask;
-
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = fsl_dma_dmamask;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+       if (ret)
+               return ret;
 
        /* Some codecs have separate DAIs for playback and capture, so we
         * should allocate a DMA buffer only for the streams that are valid.
index c6b743978d5ecdcdf402ddb43df0b7bd0400c422..6b81d0ce2c44ac637188fffdb67f10289f17f67a 100644 (file)
@@ -936,7 +936,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        ssi_private->ssi_phys = res.start;
 
        ssi_private->irq = irq_of_parse_and_map(np, 0);
-       if (ssi_private->irq == NO_IRQ) {
+       if (ssi_private->irq == 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
                return -ENXIO;
        }
index a3d60d4bea4ce8ace84d2860921b5c46d3f00895..a2fd7321b5a9a1bbd321f756af14fd9f071a372f 100644 (file)
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (machine_is_mx31_3ds()) {
+       if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
                imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
                        IMX_AUDMUX_V2_PTCR_SYN,
                        IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
index 34043c55f2a62f048232b09ff2f6165fd3726116..fd5f2fb955f182cdef9458e1ab3280d5072365a1 100644 (file)
@@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
        return 0;
 }
 
-static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
+
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &imx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = imx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_PLAYBACK);
index 46c5b4fdfc5298e61008032fe903c0191f886d31..ca1be1d9dcf0349b608f77367f5330ca7c2c6308 100644 (file)
@@ -62,7 +62,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
        struct device_node *ssi_np, *codec_np;
        struct platform_device *ssi_pdev;
        struct i2c_client *codec_dev;
-       struct imx_sgtl5000_data *data;
+       struct imx_sgtl5000_data *data = NULL;
        int int_port, ext_port;
        int ret;
 
@@ -128,7 +128,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
                goto fail;
        }
 
-       data->codec_clk = devm_clk_get(&codec_dev->dev, NULL);
+       data->codec_clk = clk_get(&codec_dev->dev, NULL);
        if (IS_ERR(data->codec_clk)) {
                ret = PTR_ERR(data->codec_clk);
                goto fail;
@@ -172,6 +172,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
        return 0;
 
 fail:
+       if (data && !IS_ERR(data->codec_clk))
+               clk_put(data->codec_clk);
        if (ssi_np)
                of_node_put(ssi_np);
        if (codec_np)
@@ -185,6 +187,7 @@ static int imx_sgtl5000_remove(struct platform_device *pdev)
        struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
 
        snd_soc_unregister_card(&data->card);
+       clk_put(data->codec_clk);
 
        return 0;
 }
index f58bcd85c07fbd8b302c0c3bcd97fd3cf6ed4d0c..57d6941676ffabc509754317337bf109e16376a3 100644 (file)
@@ -600,19 +600,17 @@ static int imx_ssi_probe(struct platform_device *pdev)
        ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
        ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
-       ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-       if (ret)
-               goto failed_pcm_fiq;
+       ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+       ssi->dma_init = imx_pcm_dma_init(pdev);
 
-       ret = imx_pcm_dma_init(pdev);
-       if (ret)
-               goto failed_pcm_dma;
+       if (ssi->fiq_init && ssi->dma_init) {
+               ret = ssi->fiq_init;
+               goto failed_pcm;
+       }
 
        return 0;
 
-failed_pcm_dma:
-       imx_pcm_fiq_exit(pdev);
-failed_pcm_fiq:
+failed_pcm:
        snd_soc_unregister_component(&pdev->dev);
 failed_register:
        release_mem_region(res->start, resource_size(res));
@@ -628,8 +626,11 @@ static int imx_ssi_remove(struct platform_device *pdev)
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct imx_ssi *ssi = platform_get_drvdata(pdev);
 
-       imx_pcm_dma_exit(pdev);
-       imx_pcm_fiq_exit(pdev);
+       if (!ssi->dma_init)
+               imx_pcm_dma_exit(pdev);
+
+       if (!ssi->fiq_init)
+               imx_pcm_fiq_exit(pdev);
 
        snd_soc_unregister_component(&pdev->dev);
 
index fb1616ba8c5967e1892b4ff6c7e180b2b9447047..560c40fc9ebbb50241e3732f3a76ece064178bf2 100644 (file)
@@ -211,6 +211,8 @@ struct imx_ssi {
        struct imx_dma_data filter_data_rx;
        struct imx_pcm_fiq_params fiq_params;
 
+       int fiq_init;
+       int dma_init;
        int enabled;
 };
 
index 722afe69169e904798974115292159a89a08ba18..cd106aa39984f32152281a80ea7812837ea789d0 100644 (file)
@@ -215,7 +215,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
                goto fail;
        }
        codec_dev = of_find_i2c_device_by_node(codec_np);
-       if (!codec_dev || !codec_dev->driver) {
+       if (!codec_dev || !codec_dev->dev.driver) {
                dev_err(&pdev->dev, "failed to find codec platform device\n");
                ret = -EINVAL;
                goto fail;
index 2a847ca494b5b4dfe00019d1b10c5ae8eccc817b..8fcf2241674054a8d687ae94598a0b1beed26e8a 100644 (file)
@@ -299,7 +299,6 @@ static struct snd_pcm_ops psc_dma_ops = {
        .hw_params      = psc_dma_hw_params,
 };
 
-static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
 static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
@@ -307,15 +306,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
        struct snd_pcm *pcm = rtd->pcm;
        struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
        size_t size = psc_dma_hardware.buffer_bytes_max;
-       int rc = 0;
+       int rc;
 
        dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
                card, dai, pcm);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &psc_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
index 710059292318878886d00383396413c3ec87c8d1..1d7ef28585e1a4125bb70bc17edd203a7195b422 100644 (file)
@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
        }
 }
 
-static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
-
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &jz4740_pcm_dmamask;
+       int ret;
 
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = jz4740_pcm_preallocate_dma_buffer(pcm,
index b238434f92b099db8bf3ba613fc596b7006ec96d..3814bb0374857b266747323fd973f37fa71dd4ab 100644 (file)
@@ -59,8 +59,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
        .fifo_size              = 0,
 };
 
-static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
-
 static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
 {
        struct kirkwood_dma_data *priv = dev_id;
@@ -292,10 +290,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &kirkwood_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = kirkwood_dma_preallocate_dma_buffer(pcm,
index c894ff0f25809c997197496e810cc52d0299d40a..f588ee45b4fdd610d21c316f469b81b687da40a9 100644 (file)
@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
        snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
 static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &nuc900_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
                card->dev, 4 * 1024, (4 * 1024) - 1);
index daa78a0095facf5ff35d1eb0b6ca61836b7c9210..4a07f7179690d36e3526cb8bcf9c4fc20e2ca509 100644 (file)
@@ -1,6 +1,6 @@
 config SND_OMAP_SOC
        tristate "SoC Audio for the Texas Instruments OMAP chips"
-       depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+       depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
        select SND_DMAENGINE_PCM
 
 config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
 
 config SND_OMAP_SOC_RX51
        tristate "SoC Audio support for Nokia RX-51"
-       depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
+       depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
        select SND_OMAP_SOC_MCBSP
        select SND_SOC_TLV320AIC3X
        select SND_SOC_TPA6130A2
index a11405de86e82bda8801e837ec48183093bd99db..b8fa9862e54c4a6c6d99c8d3b88e8204d3cd9d63 100644 (file)
@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
        .mmap           = omap_pcm_mmap,
 };
 
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
-
 static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
        int stream)
 {
@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &omap_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = omap_pcm_preallocate_dma_buffer(pcm,
index 806da27b8b671ed8596484856ec59f8092c75e8d..d58b09f4f7a426dcba4e7a82c26434161dbe124e 100644 (file)
@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
        .mmap           = pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
index d0740a762963d867bf75cce29d20c15c0a4b5097..283620a97fe7b597d2670bd8c985610f5d1161ba 100644 (file)
@@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
        snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
 {
        struct snd_card *card = runtime->card->snd_card;
@@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
        params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
                        pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &s6000_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (res)
+               return res;
 
        if (params->dma_in) {
                s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
index 9338d11e92168c222eae4630228f711bd9cf803c..fe2748b494d4cd38c6db6ae4409d38f56819ff3f 100644 (file)
@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
        }
 }
 
-static u64 dma_mask = DMA_BIT_MASK(32);
-
 static int dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
        pr_debug("Entered %s\n", __func__);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &dma_mask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = preallocate_dma_buffer(pcm,
index ce1e1e16f250affafbc333d165c2843e043bfa23..e4f318fc2f82bb048f5047a6773f1a2c8be40902 100644 (file)
@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
        return 0;
 }
 
-static u64 idma_mask = DMA_BIT_MASK(32);
-
 static int idma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &idma_mask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = preallocate_idma_buffer(pcm,
index 9cc6986a8cfb347465a9aa442b3b4e79a4f8d9f6..5dd87f4c919e50650e61e8084856138d47f6f119 100644 (file)
@@ -220,8 +220,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
 void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
                               struct rsnd_mod *mod,
                               enum rsnd_reg reg);
-#define rsnd_is_gen1(s)                ((s)->info->flags & RSND_GEN1)
-#define rsnd_is_gen2(s)                ((s)->info->flags & RSND_GEN2)
+#define rsnd_is_gen1(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
+#define rsnd_is_gen2(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
 
 /*
  *     R-Car ADG
index 4d0561312f3be0a9f383928b3d74f80a44cdd3b3..1a38be0d0ca8fbf0724bbec4087569ee418d39a2 100644 (file)
@@ -1380,7 +1380,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
                                return -ENODEV;
 
                        list_add(&cpu_dai->dapm.list, &card->dapm_list);
-                       snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
                }
 
                if (cpu_dai->driver->probe) {
index d0323a693ba20f4719731369f85324e9ba582096..999550bbad40e66f259d9ed2b044ed478e377d72 100644 (file)
@@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       area->vm_flags |= VM_DONTDUMP;
+       if (!read)
+               area->vm_flags |= VM_DONTEXPAND;
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
index 63fb5219f0f8aad7a844eb1942b5cd728be678dc..6234a51625b1b6a556f252c2fea3a150db1bdbc1 100644 (file)
@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
        usX2Y_clients_stop(usX2Y);
 }
 
-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
-                                struct snd_usX2Y_substream *subs, struct urb *urb)
-{
-       snd_printk(KERN_ERR
-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
-"Most probably some urb of usb-frame %i is still missing.\n"
-"Cause could be too long delays in usb-hcd interrupt handling.\n",
-                  usb_get_current_frame_number(usX2Y->dev),
-                  subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
-                  usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
-       usX2Y_clients_stop(usX2Y);
-}
-
 static void i_usX2Y_urb_complete(struct urb *urb)
 {
        struct snd_usX2Y_substream *subs = urb->context;
@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
                usX2Y_error_urb_status(usX2Y, subs, urb);
                return;
        }
-       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-               subs->completed_urb = urb;
-       else {
-               usX2Y_error_sequence(usX2Y, subs, urb);
-               return;
-       }
+
+       subs->completed_urb = urb;
+
        {
                struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
                        *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
index f2a1acdc4d839f22eb7fa83d63b5f35ce367c5cd..814d0e887c62e5c451c3ff7cc4b8f448c3a45007 100644 (file)
@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
                usX2Y_error_urb_status(usX2Y, subs, urb);
                return;
        }
-       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-               subs->completed_urb = urb;
-       else {
-               usX2Y_error_sequence(usX2Y, subs, urb);
-               return;
-       }
 
+       subs->completed_urb = urb;
        capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
        capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
        playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
index 099e7cd022e46d47260103b226b3410beac27133..7c43479623537af4f0d4179f4cce195cbea3e9b1 100644 (file)
@@ -5,7 +5,6 @@
 #include <stdbool.h>
 #include <sys/vfs.h>
 #include <sys/mount.h>
-#include <linux/magic.h>
 #include <linux/kernel.h>
 
 #include "debugfs.h"
index 3a0ff7fb71b633df77cfd1302ce35e2bf9506142..64c043b7a43848f17a023dcf9479dbd77f96d7be 100644 (file)
@@ -770,6 +770,7 @@ check: $(OUTPUT)common-cmds.h
 install-bin: all
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
        $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
+       $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
        $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 ifndef NO_LIBPERL
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
index 15130b50dfe3be2fea6a975a102283f6d33e4887..fe9b61e322a557b063ecc78eccb9a87c8de73dda 100644 (file)
@@ -2,3 +2,6 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
new file mode 100644 (file)
index 0000000..2a1cfde
--- /dev/null
@@ -0,0 +1,54 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM_MAX) - 1)
+#define PERF_REG_IP    PERF_REG_ARM_PC
+#define PERF_REG_SP    PERF_REG_ARM_SP
+
+static inline const char *perf_reg_name(int id)
+{
+       switch (id) {
+       case PERF_REG_ARM_R0:
+               return "r0";
+       case PERF_REG_ARM_R1:
+               return "r1";
+       case PERF_REG_ARM_R2:
+               return "r2";
+       case PERF_REG_ARM_R3:
+               return "r3";
+       case PERF_REG_ARM_R4:
+               return "r4";
+       case PERF_REG_ARM_R5:
+               return "r5";
+       case PERF_REG_ARM_R6:
+               return "r6";
+       case PERF_REG_ARM_R7:
+               return "r7";
+       case PERF_REG_ARM_R8:
+               return "r8";
+       case PERF_REG_ARM_R9:
+               return "r9";
+       case PERF_REG_ARM_R10:
+               return "r10";
+       case PERF_REG_ARM_FP:
+               return "fp";
+       case PERF_REG_ARM_IP:
+               return "ip";
+       case PERF_REG_ARM_SP:
+               return "sp";
+       case PERF_REG_ARM_LR:
+               return "lr";
+       case PERF_REG_ARM_PC:
+               return "pc";
+       default:
+               return NULL;
+       }
+
+       return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/util/unwind.c b/tools/perf/arch/arm/util/unwind.c
new file mode 100644 (file)
index 0000000..da3dc95
--- /dev/null
@@ -0,0 +1,48 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+int unwind__arch_reg_id(int regnum)
+{
+       switch (regnum) {
+       case UNW_ARM_R0:
+               return PERF_REG_ARM_R0;
+       case UNW_ARM_R1:
+               return PERF_REG_ARM_R1;
+       case UNW_ARM_R2:
+               return PERF_REG_ARM_R2;
+       case UNW_ARM_R3:
+               return PERF_REG_ARM_R3;
+       case UNW_ARM_R4:
+               return PERF_REG_ARM_R4;
+       case UNW_ARM_R5:
+               return PERF_REG_ARM_R5;
+       case UNW_ARM_R6:
+               return PERF_REG_ARM_R6;
+       case UNW_ARM_R7:
+               return PERF_REG_ARM_R7;
+       case UNW_ARM_R8:
+               return PERF_REG_ARM_R8;
+       case UNW_ARM_R9:
+               return PERF_REG_ARM_R9;
+       case UNW_ARM_R10:
+               return PERF_REG_ARM_R10;
+       case UNW_ARM_R11:
+               return PERF_REG_ARM_FP;
+       case UNW_ARM_R12:
+               return PERF_REG_ARM_IP;
+       case UNW_ARM_R13:
+               return PERF_REG_ARM_SP;
+       case UNW_ARM_R14:
+               return PERF_REG_ARM_LR;
+       case UNW_ARM_R15:
+               return PERF_REG_ARM_PC;
+       default:
+               pr_err("unwind: invalid reg id %d\n", regnum);
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
index 9570c2b0f83c580454e2610cb3c07a4d7443ee19..b2519e49424f4c42bb389de846d21c8337773036 100644 (file)
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                             struct perf_tsc_conversion *tc)
 {
-       bool cap_usr_time_zero;
+       bool cap_user_time_zero;
        u32 seq;
        int i = 0;
 
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                tc->time_mult = pc->time_mult;
                tc->time_shift = pc->time_shift;
                tc->time_zero = pc->time_zero;
-               cap_usr_time_zero = pc->cap_usr_time_zero;
+               cap_user_time_zero = pc->cap_user_time_zero;
                rmb();
                if (pc->lock == seq && !(seq & 1))
                        break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                }
        }
 
-       if (!cap_usr_time_zero)
+       if (!cap_user_time_zero)
                return -EOPNOTSUPP;
 
        return 0;
index 423875c999b21208a5a6274d0fd47a2fb359ecd8..afe377b2884f740b0c469a367ab3938d99f7df0e 100644 (file)
@@ -321,8 +321,6 @@ found:
        return perf_event__repipe(tool, event_sw, &sample_sw, machine);
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index c2dff9cb1f2ce1ac150401d7b859229beb53998e..9b5f077fee5b1b65a4e51b48ef1af0727b8c134b 100644 (file)
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
 
        dir1 = opendir(PATH_SYS_NODE);
        if (!dir1)
-               return -1;
+               return 0;
 
        while ((dent1 = readdir(dir1)) != NULL) {
                if (dent1->d_type != DT_DIR ||
index 8e50d8d77419c7ca3e8ce72209b113d72665c09e..72eae7498c09419c8f1f77a7a005c6dcbbb5eb4b 100644 (file)
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
        return 0;
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
                }
        }
 
+       if (session_done())
+               return 0;
+
        if (nr_samples == 0) {
                ui__error("The %s file has no samples!\n", session->filename);
                return 0;
index 7f31a3ded1b6dc59730a0162d5af279e038d1afc..9c333ff3dfeb3de716eac13d48d7364e998bb50d 100644 (file)
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
        .ordering_requires_timestamps = true,
 };
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index f686d5ff594e6b93c6e6f732e7a1c97aa18c4e49..5098f144b92defd53e94f9f24184e3ade0672928 100644 (file)
@@ -457,6 +457,7 @@ static int __run_perf_stat(int argc, const char **argv)
                        perror("failed to prepare workload");
                        return -1;
                }
+               child_pid = evsel_list->workload.pid;
        }
 
        if (group)
index f5aa6375e3e9add4eea3cdd81de685fa37d87c4d..71aa3e35406bd064e87044d2ae71597a5f117092 100644 (file)
 #include <sys/mman.h>
 #include <linux/futex.h>
 
+/* For older distros: */
+#ifndef MAP_STACK
+# define MAP_STACK             0x20000
+#endif
+
+#ifndef MADV_HWPOISON
+# define MADV_HWPOISON         100
+#endif
+
+#ifndef MADV_MERGEABLE
+# define MADV_MERGEABLE                12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+# define MADV_UNMERGEABLE      13
+#endif
+
 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
                                         unsigned long arg,
                                         u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
 
        trace->tool.sample        = trace__process_sample;
        trace->tool.mmap          = perf_event__process_mmap;
+       trace->tool.mmap2         = perf_event__process_mmap2;
        trace->tool.comm          = perf_event__process_comm;
        trace->tool.exit          = perf_event__process_exit;
        trace->tool.fork          = perf_event__process_fork;
index 214e17e97e5c7ba5aa25545b465b8994ac666afc..75b93d7f786010000368e299d214717131c67fa1 100644 (file)
@@ -29,6 +29,10 @@ ifeq ($(ARCH),x86_64)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
 endif
+ifeq ($(ARCH),arm)
+  NO_PERF_REGS := 0
+  LIBUNWIND_LIBS = -lunwind -lunwind-arm
+endif
 
 ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS
@@ -87,7 +91,7 @@ CFLAGS += -Wall
 CFLAGS += -Wextra
 CFLAGS += -std=gnu99
 
-EXTLIBS = -lelf -lpthread -lrt -lm
+EXTLIBS = -lelf -lpthread -lrt -lm -ldl
 
 ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
   CFLAGS += -fstack-protector-all
@@ -180,6 +184,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
 ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
   CFLAGS += -DLIBELF_MMAP
 endif
+ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
+  CFLAGS += -DHAVE_ELF_GETPHDRNUM
+endif
 
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
@@ -205,8 +212,7 @@ ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
 endif # try-cc
 endif # NO_LIBELF
 
-# There's only x86 (both 32 and 64) support for CFI unwind so far
-ifneq ($(ARCH),x86)
+ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 endif
 
@@ -220,9 +226,13 @@ endif
 
 FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(CFLAGS) $(LIBUNWIND_LDFLAGS) $(LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
 ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
-  msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
+  msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
   NO_LIBUNWIND := 1
 endif # Libunwind support
+ifneq ($(call try-cc,$(SOURCE_LIBUNWIND_DEBUG_FRAME),$(FLAGS_UNWIND),libunwind debug_frame),y)
+  msg := $(warning No debug_frame support found in libunwind);
+CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+endif # debug_frame support in libunwind
 endif # NO_LIBUNWIND
 
 ifndef NO_LIBUNWIND
index 708fb8e9822a3ed43bd192470c5da6f01c236c38..028fe997d5ebcfecce4d013fa43e9c21c7f989a2 100644 (file)
@@ -61,6 +61,15 @@ int main(void)
 }
 endef
 
+define SOURCE_ELF_GETPHDRNUM
+#include <libelf.h>
+int main(void)
+{
+       size_t dst;
+       return elf_getphdrnum(0, &dst);
+}
+endef
+
 ifndef NO_SLANG
 define SOURCE_SLANG
 #include <slang.h>
@@ -176,7 +185,6 @@ extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
                                       unw_proc_info_t *pi,
                                       int need_unwind_info, void *arg);
 
-
 #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
 
 int main(void)
@@ -188,6 +196,26 @@ int main(void)
        return 0;
 }
 endef
+
+define SOURCE_LIBUNWIND_DEBUG_FRAME
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+                                unw_word_t ip, unw_word_t segbase,
+                                const char *obj_name, unw_word_t start,
+                                unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+       dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+       return 0;
+}
+endef
+
 endif
 
 ifndef NO_BACKTRACE
@@ -210,6 +238,7 @@ define SOURCE_LIBAUDIT
 
 int main(void)
 {
+       printf(\"error message: %s\", audit_errno_to_name(0));
        return audit_open();
 }
 endef
index bfc5a27597d60e1f09b7f95ef691e37608f8c694..7eae5488ecea47344cac10677104cb9e6cb2cc44 100644 (file)
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
                    end = map__rip_2objdump(map, sym->end);
 
                offset = line_ip - start;
-               if (offset < 0 || (u64)line_ip > end)
+               if ((u64)line_ip < start || (u64)line_ip > end)
                        offset = -1;
                else
                        parsed_line = tmp2 + 1;
index 3e5f5430a28aa929741e2bd3ccbcf554ae62a074..7defd77105d005f9820c7ca69621e814a3c45c76 100644 (file)
@@ -262,6 +262,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
                ret == DW_ATE_signed_fixed);
 }
 
+/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+       Dwarf_Attribute attr;
+
+       return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+               dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
 /**
  * die_get_data_member_location - Get the data-member offset
  * @mb_die: a DIE of a member of a data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
 {
        struct __addr_die_search_param *ad = data;
 
+       /*
+        * Since a declaration entry doesn't has given pc, this always returns
+        * function definition entry.
+        */
        if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
            dwarf_haspc(fn_die, ad->addr)) {
                memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
@@ -407,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
  * @die_mem: a buffer for result DIE
  *
  * Search a non-inlined function DIE which includes @addr. Stores the
- * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
  */
 Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
                                    Dwarf_Die *die_mem)
@@ -434,16 +453,33 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
        return DIE_FIND_CB_CONTINUE;
 }
 
+/**
+ * die_find_top_inlinefunc - Search the top inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * Even if several inlined functions are expanded recursively, this
+ * doesn't trace it down, and returns the topmost one.
+ */
+Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+                                  Dwarf_Die *die_mem)
+{
+       return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
+}
+
 /**
  * die_find_inlinefunc - Search an inlined function at given address
- * @cu_die: a CU DIE which including @addr
+ * @sp_die: a subprogram DIE which including @addr
  * @addr: target address
  * @die_mem: a buffer for result DIE
  *
  * Search an inlined function DIE which includes @addr. Stores the
- * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
  * If several inlined functions are expanded recursively, this trace
- * it and returns deepest one.
+ * it down and returns deepest one.
  */
 Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
                               Dwarf_Die *die_mem)
index 6ce1717784b7ab42a14d58431c3026df749f0620..b4fe90c6cb2d4413c3ab96f91a346f3f89549932 100644 (file)
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
 extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
                        int (*callback)(Dwarf_Die *, void *), void *data);
 
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
 /* Compare diename and tname */
 extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
 
@@ -76,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
 extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
                                    Dwarf_Die *die_mem);
 
-/* Search an inlined function including given address */
+/* Search the top inlined function including given address */
+extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+                                         Dwarf_Die *die_mem);
+
+/* Search the deepest inlined function including given address */
 extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
                                      Dwarf_Die *die_mem);
 
index 26441d0e571bfce2bcd469dfdfe428175facb5ee..c3e5a3b817ab714497dc7f6f39520945dc886b1a 100644 (file)
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
        return write_padded(fd, name, name_len + 1, len);
 }
 
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
-                               u16 misc, int fd)
+static int __dsos__write_buildid_table(struct list_head *head,
+                                      struct machine *machine,
+                                      pid_t pid, u16 misc, int fd)
 {
+       char nm[PATH_MAX];
        struct dso *pos;
 
        dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
                if (is_vdso_map(pos->short_name)) {
                        name = (char *) VDSO__MAP_NAME;
                        name_len = sizeof(VDSO__MAP_NAME) + 1;
+               } else if (dso__is_kcore(pos)) {
+                       machine__mmap_name(machine, nm, sizeof(nm));
+                       name = nm;
+                       name_len = strlen(nm) + 1;
                } else {
                        name = pos->long_name;
                        name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
                umisc = PERF_RECORD_MISC_GUEST_USER;
        }
 
-       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
-                                         kmisc, fd);
+       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+                                         machine->pid, kmisc, fd);
        if (err == 0)
-               err = __dsos__write_buildid_table(&machine->user_dsos,
+               err = __dsos__write_buildid_table(&machine->user_dsos, machine,
                                                  machine->pid, umisc, fd);
        return err;
 }
@@ -375,23 +381,31 @@ out_free:
        return err;
 }
 
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+                              const char *debugdir)
 {
        bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
        bool is_vdso = is_vdso_map(dso->short_name);
+       char *name = dso->long_name;
+       char nm[PATH_MAX];
 
-       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
-                                    dso->long_name, debugdir,
-                                    is_kallsyms, is_vdso);
+       if (dso__is_kcore(dso)) {
+               is_kallsyms = true;
+               machine__mmap_name(machine, nm, sizeof(nm));
+               name = nm;
+       }
+       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+                                    debugdir, is_kallsyms, is_vdso);
 }
 
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+static int __dsos__cache_build_ids(struct list_head *head,
+                                  struct machine *machine, const char *debugdir)
 {
        struct dso *pos;
        int err = 0;
 
        dsos__for_each_with_build_id(pos, head)
-               if (dso__cache_build_id(pos, debugdir))
+               if (dso__cache_build_id(pos, machine, debugdir))
                        err = -1;
 
        return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
 
 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
 {
-       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
-       ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+                                         debugdir);
+       ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
        return ret;
 }
 
@@ -2753,6 +2768,18 @@ int perf_session__read_header(struct perf_session *session)
        if (perf_file_header__read(&f_header, header, fd) < 0)
                return -EINVAL;
 
+       /*
+        * Sanity check that perf.data was written cleanly; data size is
+        * initialized to 0 and updated only if the on_exit function is run.
+        * If data size is still 0 then the file contains only partial
+        * information.  Just warn user and process it as much as it can.
+        */
+       if (f_header.data.size == 0) {
+               pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
+                          "Was the 'perf record' command properly terminated?\n",
+                          session->filename);
+       }
+
        nr_attrs = f_header.attrs.size / f_header.attr_size;
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
index 46a0d35a05e1f21aae097e7a67cea89bfe9ecddf..9ff6cf3e9a99f69596b37372f60203c11bec88a3 100644 (file)
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
        next = rb_first(root);
 
        while (next) {
+               if (session_done())
+                       break;
                n = rb_entry(next, struct hist_entry, rb_node_in);
                next = rb_next(&n->rb_node_in);
 
index 933d14f287ca92645152f7714651a10b818e87bc..6188d2876a7128aaa68e426c3334dfcae099dc41 100644 (file)
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
                modules = path;
        }
 
-       if (symbol__restricted_filename(path, "/proc/modules"))
+       if (symbol__restricted_filename(modules, "/proc/modules"))
                return -1;
 
        file = fopen(modules, "r");
index be0329394d5639f77644d8a9079dd6ceb4f27a73..c09e0a9fdf4cda118be077fbee1fb382dcc3d5ee 100644 (file)
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
 static int debuginfo__init_offline_dwarf(struct debuginfo *self,
                                         const char *path)
 {
-       Dwfl_Module *mod;
        int fd;
 
        fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
        if (!self->dwfl)
                goto error;
 
-       mod = dwfl_report_offline(self->dwfl, "", "", fd);
-       if (!mod)
+       self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
+       if (!self->mod)
                goto error;
 
-       self->dbg = dwfl_module_getdwarf(mod, &self->bias);
+       self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
        if (!self->dbg)
                goto error;
 
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
 }
 
 /* Convert subprogram DIE to trace point */
-static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
-                                 bool retprobe, struct probe_trace_point *tp)
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
+                                 Dwarf_Addr paddr, bool retprobe,
+                                 struct probe_trace_point *tp)
 {
        Dwarf_Addr eaddr, highaddr;
-       const char *name;
-
-       /* Copy the name of probe point */
-       name = dwarf_diename(sp_die);
-       if (name) {
-               if (dwarf_entrypc(sp_die, &eaddr) != 0) {
-                       pr_warning("Failed to get entry address of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -ENOENT;
-               }
-               if (dwarf_highpc(sp_die, &highaddr) != 0) {
-                       pr_warning("Failed to get end address of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -ENOENT;
-               }
-               if (paddr > highaddr) {
-                       pr_warning("Offset specified is greater than size of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -EINVAL;
-               }
-               tp->symbol = strdup(name);
-               if (tp->symbol == NULL)
-                       return -ENOMEM;
-               tp->offset = (unsigned long)(paddr - eaddr);
-       } else
-               /* This function has no name. */
-               tp->offset = (unsigned long)paddr;
+       GElf_Sym sym;
+       const char *symbol;
+
+       /* Verify the address is correct */
+       if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+               pr_warning("Failed to get entry address of %s\n",
+                          dwarf_diename(sp_die));
+               return -ENOENT;
+       }
+       if (dwarf_highpc(sp_die, &highaddr) != 0) {
+               pr_warning("Failed to get end address of %s\n",
+                          dwarf_diename(sp_die));
+               return -ENOENT;
+       }
+       if (paddr > highaddr) {
+               pr_warning("Offset specified is greater than size of %s\n",
+                          dwarf_diename(sp_die));
+               return -EINVAL;
+       }
+
+       /* Get an appropriate symbol from symtab */
+       symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+       if (!symbol) {
+               pr_warning("Failed to find symbol at 0x%lx\n",
+                          (unsigned long)paddr);
+               return -ENOENT;
+       }
+       tp->offset = (unsigned long)(paddr - sym.st_value);
+       tp->symbol = strdup(symbol);
+       if (!tp->symbol)
+               return -ENOMEM;
 
        /* Return probe must be on the head of a subprogram */
        if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
        }
 
        /* If not a real subprogram, find a real one */
-       if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
+       if (!die_is_func_def(sc_die)) {
                if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
                        pr_warning("Failed to find probe point in any "
                                   "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
        struct dwarf_callback_param *param = data;
        struct probe_finder *pf = param->data;
        struct perf_probe_point *pp = &pf->pev->point;
-       Dwarf_Attribute attr;
 
        /* Check tag and diename */
-       if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
-           !die_compare_name(sp_die, pp->function) ||
-           dwarf_attr(sp_die, DW_AT_declaration, &attr))
+       if (!die_is_func_def(sp_die) ||
+           !die_compare_name(sp_die, pp->function))
                return DWARF_CB_OK;
 
        /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
        tev = &tf->tevs[tf->ntevs++];
 
        /* Trace point should be converted from subprogram DIE */
-       ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+       ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
                                     pf->pev->point.retprobe, &tev->point);
        if (ret < 0)
                return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
 {
        struct trace_event_finder tf = {
                        .pf = {.pev = pev, .callback = add_probe_trace_event},
-                       .max_tevs = max_tevs};
+                       .mod = self->mod, .max_tevs = max_tevs};
        int ret;
 
        /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
        vl = &af->vls[af->nvls++];
 
        /* Trace point should be converted from subprogram DIE */
-       ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+       ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
                                     pf->pev->point.retprobe, &vl->point);
        if (ret < 0)
                return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
 {
        struct available_var_finder af = {
                        .pf = {.pev = pev, .callback = add_available_vars},
+                       .mod = self->mod,
                        .max_vls = max_vls, .externs = externs};
        int ret;
 
@@ -1324,8 +1327,8 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
                                struct perf_probe_point *ppt)
 {
        Dwarf_Die cudie, spdie, indie;
-       Dwarf_Addr _addr, baseaddr;
-       const char *fname = NULL, *func = NULL, *tmp;
+       Dwarf_Addr _addr = 0, baseaddr = 0;
+       const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
 
        /* Adjust address with bias */
@@ -1346,27 +1349,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
        /* Find a corresponding function (name, baseline and baseaddr) */
        if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
                /* Get function entry information */
-               tmp = dwarf_diename(&spdie);
-               if (!tmp ||
+               func = basefunc = dwarf_diename(&spdie);
+               if (!func ||
                    dwarf_entrypc(&spdie, &baseaddr) != 0 ||
-                   dwarf_decl_line(&spdie, &baseline) != 0)
+                   dwarf_decl_line(&spdie, &baseline) != 0) {
+                       lineno = 0;
                        goto post;
-               func = tmp;
+               }
 
-               if (addr == (unsigned long)baseaddr)
+               if (addr == (unsigned long)baseaddr) {
                        /* Function entry - Relative line number is 0 */
                        lineno = baseline;
-               else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
-                                            &indie)) {
+                       fname = dwarf_decl_file(&spdie);
+                       goto post;
+               }
+
+               /* Track down the inline functions step by step */
+               while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
+                                               &indie)) {
+                       /* There is an inline function */
                        if (dwarf_entrypc(&indie, &_addr) == 0 &&
-                           _addr == addr)
+                           _addr == addr) {
                                /*
                                 * addr is at an inline function entry.
                                 * In this case, lineno should be the call-site
-                                * line number.
+                                * line number. (overwrite lineinfo)
                                 */
                                lineno = die_get_call_lineno(&indie);
-                       else {
+                               fname = die_get_call_file(&indie);
+                               break;
+                       } else {
                                /*
                                 * addr is in an inline function body.
                                 * Since lineno points one of the lines
@@ -1374,19 +1386,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
                                 * be the entry line of the inline function.
                                 */
                                tmp = dwarf_diename(&indie);
-                               if (tmp &&
-                                   dwarf_decl_line(&spdie, &baseline) == 0)
-                                       func = tmp;
+                               if (!tmp ||
+                                   dwarf_decl_line(&indie, &baseline) != 0)
+                                       break;
+                               func = tmp;
+                               spdie = indie;
                        }
                }
+               /* Verify the lineno and baseline are in a same file */
+               tmp = dwarf_decl_file(&spdie);
+               if (!tmp || strcmp(tmp, fname) != 0)
+                       lineno = 0;
        }
 
 post:
        /* Make a relative line number or an offset */
        if (lineno)
                ppt->line = lineno - baseline;
-       else if (func)
+       else if (basefunc) {
                ppt->offset = addr - (unsigned long)baseaddr;
+               func = basefunc;
+       }
 
        /* Duplicate strings */
        if (func) {
@@ -1474,7 +1494,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
        return 0;
 }
 
-/* Search function from function name */
+/* Search function definition from function name */
 static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
 {
        struct dwarf_callback_param *param = data;
@@ -1485,7 +1505,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
                return DWARF_CB_OK;
 
-       if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+       if (die_is_func_def(sp_die) &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
                dwarf_decl_line(sp_die, &lr->offset);
index 17e94d0c36f981dbf8dbc245410b9a4aa511d16c..3b7d63018960d7ff6a6808ce81eff2d34b40bd09 100644 (file)
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
 /* debug information structure */
 struct debuginfo {
        Dwarf           *dbg;
+       Dwfl_Module     *mod;
        Dwfl            *dwfl;
        Dwarf_Addr      bias;
 };
@@ -77,6 +78,7 @@ struct probe_finder {
 
 struct trace_event_finder {
        struct probe_finder     pf;
+       Dwfl_Module             *mod;           /* For solving symbols */
        struct probe_trace_event *tevs;         /* Found trace events */
        int                     ntevs;          /* Number of trace events */
        int                     max_tevs;       /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
 
 struct available_var_finder {
        struct probe_finder     pf;
+       Dwfl_Module             *mod;           /* For solving symbols */
        struct variable_list    *vls;           /* Found variable lists */
        int                     nvls;           /* Number of variable lists */
        int                     max_vls;        /* Max no. of variable lists */
index 51f5edf2a6d0d140dd897c58f9d606c9fb25be9d..568b750c01f60b3d81cda29966ed40534a7bc8e1 100644 (file)
@@ -256,6 +256,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->sample = process_event_sample_stub;
        if (tool->mmap == NULL)
                tool->mmap = process_event_stub;
+       if (tool->mmap2 == NULL)
+               tool->mmap2 = process_event_stub;
        if (tool->comm == NULL)
                tool->comm = process_event_stub;
        if (tool->fork == NULL)
@@ -531,6 +533,9 @@ static int flush_sample_queue(struct perf_session *s,
                return 0;
 
        list_for_each_entry_safe(iter, tmp, head, list) {
+               if (session_done())
+                       return 0;
+
                if (iter->timestamp > limit)
                        break;
 
@@ -1160,7 +1165,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
        }
 }
 
-#define session_done() (*(volatile int *)(&session_done))
 volatile int session_done;
 
 static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1308,7 +1312,7 @@ int __perf_session__process_events(struct perf_session *session,
        file_offset = page_offset;
        head = data_offset - page_offset;
 
-       if (data_offset + data_size < file_size)
+       if (data_size && (data_offset + data_size < file_size))
                file_size = data_offset + data_size;
 
        progress_next = file_size / 16;
@@ -1372,10 +1376,13 @@ more:
                                    "Processing events...");
        }
 
+       err = 0;
+       if (session_done())
+               goto out_err;
+
        if (file_pos < file_size)
                goto more;
 
-       err = 0;
        /* do the final flush for ordered samples */
        session->ordered_samples.next_flush = ULLONG_MAX;
        err = flush_sample_queue(session, tool);
index 3aa75fb2225f7129daa12f7e86219f30928e5e42..04bf7373a7e5fb04222b1a9cdb5c295045c0626f 100644 (file)
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
 
 #define perf_session__set_tracepoints_handlers(session, array) \
        __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+
+extern volatile int session_done;
+
+#define session_done() (*(volatile int *)(&session_done))
 #endif /* __PERF_SESSION_H */
index a7b9ab55738086c24d12d7e9ee8cb6143ffd47aa..a9c829be52169eac5b9f00d9382fd9281152b9e6 100644 (file)
@@ -8,6 +8,22 @@
 #include "symbol.h"
 #include "debug.h"
 
+#ifndef HAVE_ELF_GETPHDRNUM
+static int elf_getphdrnum(Elf *elf, size_t *dst)
+{
+       GElf_Ehdr gehdr;
+       GElf_Ehdr *ehdr;
+
+       ehdr = gelf_getehdr(elf, &gehdr);
+       if (!ehdr)
+               return -1;
+
+       *dst = ehdr->e_phnum;
+
+       return 0;
+}
+#endif
+
 #ifndef NT_GNU_BUILD_ID
 #define NT_GNU_BUILD_ID 3
 #endif
index fe7a27d67d2b7af23a596683509769a4f1bd6e38..e9e1c03f927d27bce1e041a9cc61ca47dae2b987 100644 (file)
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
        char *next = NULL;
        char *addr_str;
        char *mod;
-       char *fmt;
+       char *fmt = NULL;
 
        line = strtok_r(file, "\n", &next);
        while (line) {
index 2f891f7e70bf9251c849780ec008ded14a2776c9..5390d0b8862a680e147cf52dd8bed22cfbba333f 100644 (file)
@@ -39,6 +39,15 @@ UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
 
 #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
 
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+                                unw_word_t ip,
+                                unw_word_t segbase,
+                                const char *obj_name, unw_word_t start,
+                                unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
 #define DW_EH_PE_FORMAT_MASK   0x0f    /* format of the encoded value */
 #define DW_EH_PE_APPL_MASK     0x70    /* how the value is to be applied */
 
@@ -245,8 +254,9 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
        return 0;
 }
 
-static int read_unwind_spec(struct dso *dso, struct machine *machine,
-                           u64 *table_data, u64 *segbase, u64 *fde_count)
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+                                    u64 *table_data, u64 *segbase,
+                                    u64 *fde_count)
 {
        int ret = -EINVAL, fd;
        u64 offset;
@@ -255,6 +265,7 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
        if (fd < 0)
                return -EINVAL;
 
+       /* Check the .eh_frame section for unwinding info */
        offset = elf_section_offset(fd, ".eh_frame_hdr");
        close(fd);
 
@@ -263,10 +274,29 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
                                          table_data, segbase,
                                          fde_count);
 
-       /* TODO .debug_frame check if eh_frame_hdr fails */
        return ret;
 }
 
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+                                       struct machine *machine, u64 *offset)
+{
+       int fd = dso__data_fd(dso, machine);
+
+       if (fd < 0)
+               return -EINVAL;
+
+       /* Check the .debug_frame section for unwinding info */
+       *offset = elf_section_offset(fd, ".debug_frame");
+       close(fd);
+
+       if (*offset)
+               return 0;
+
+       return -EINVAL;
+}
+#endif
+
 static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
 {
        struct addr_location al;
@@ -291,20 +321,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
 
        pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
 
-       if (read_unwind_spec(map->dso, ui->machine,
-                            &table_data, &segbase, &fde_count))
-               return -EINVAL;
+       /* Check the .eh_frame section for unwinding info */
+       if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+                                      &table_data, &segbase, &fde_count)) {
+               memset(&di, 0, sizeof(di));
+               di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
+               di.start_ip = map->start;
+               di.end_ip   = map->end;
+               di.u.rti.segbase    = map->start + segbase;
+               di.u.rti.table_data = map->start + table_data;
+               di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
+                                     / sizeof(unw_word_t);
+               return dwarf_search_unwind_table(as, ip, &di, pi,
+                                                need_unwind_info, arg);
+       }
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+       /* Check the .debug_frame section for unwinding info */
+       if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+               memset(&di, 0, sizeof(di));
+               dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+                                      map->start, map->end);
+               return dwarf_search_unwind_table(as, ip, &di, pi,
+                                                need_unwind_info, arg);
+       }
+#endif
 
-       memset(&di, 0, sizeof(di));
-       di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
-       di.start_ip = map->start;
-       di.end_ip   = map->end;
-       di.u.rti.segbase    = map->start + segbase;
-       di.u.rti.table_data = map->start + table_data;
-       di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
-                             / sizeof(unw_word_t);
-       return dwarf_search_unwind_table(as, ip, &di, pi,
-                                        need_unwind_info, arg);
+       return -EINVAL;
 }
 
 static int access_fpreg(unw_addr_space_t __maybe_unused as,
index fe702076ca46cc2d3d02bab818446c9d15f8c392..2bb8bf506681b4d982546dd9d14b005df815a5fc 100644 (file)
@@ -2,7 +2,7 @@
  * turbostat -- show CPU frequency and C-state residency
  * on modern Intel turbo-capable processors.
  *
- * Copyright (c) 2012 Intel Corporation.
+ * Copyright (c) 2013 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
 unsigned int do_c8_c9_c10;
+unsigned int do_slm_cstates;
+unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;       /* Ghz etc */
@@ -81,6 +83,8 @@ double rapl_joule_counter_range;
 #define RAPL_DRAM      (1 << 3)
 #define RAPL_PKG_PERF_STATUS   (1 << 4)
 #define RAPL_DRAM_PERF_STATUS  (1 << 5)
+#define RAPL_PKG_POWER_INFO    (1 << 6)
+#define RAPL_CORE_POLICY       (1 << 7)
 #define        TJMAX_DEFAULT   100
 
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -96,7 +100,7 @@ struct thread_data {
        unsigned long long tsc;
        unsigned long long aperf;
        unsigned long long mperf;
-       unsigned long long c1;  /* derived */
+       unsigned long long c1;
        unsigned long long extra_msr64;
        unsigned long long extra_delta64;
        unsigned long long extra_msr32;
@@ -266,7 +270,7 @@ void print_header(void)
                outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
        if (do_nhm_cstates)
                outp += sprintf(outp, "    %%c1");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "    %%c3");
        if (do_nhm_cstates)
                outp += sprintf(outp, "    %%c6");
@@ -280,9 +284,9 @@ void print_header(void)
 
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc2");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "   %%pc3");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc7");
@@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
@@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
 
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
@@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        }
 
 
-       /*
-        * As counter collection is not atomic,
-        * it is possible for mperf's non-halted cycles + idle states
-        * to exceed TSC's all cycles: show c1 = 0% in that case.
-        */
-       if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
-               old->c1 = 0;
-       else {
-               /* normal case, derive c1 */
-               old->c1 = old->tsc - old->mperf - core_delta->c3
+       if (use_c1_residency_msr) {
+               /*
+                * Some models have a dedicated C1 residency MSR,
+                * which should be more accurate than the derivation below.
+                */
+       } else {
+               /*
+                * As counter collection is not atomic,
+                * it is possible for mperf's non-halted cycles + idle states
+                * to exceed TSC's all cycles: show c1 = 0% in that case.
+                */
+               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+                       old->c1 = 0;
+               else {
+                       /* normal case, derive c1 */
+                       old->c1 = old->tsc - old->mperf - core_delta->c3
                                - core_delta->c6 - core_delta->c7;
+               }
        }
 
        if (old->mperf == 0) {
@@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
                        return -5;
 
+       if (use_c1_residency_msr) {
+               if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
+                       return -6;
+       }
+
        /* collect core counters only for 1st thread in core */
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                return 0;
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
+       }
+
+       if (do_nhm_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
        }
@@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                return 0;
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates) {
                if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
                        return -9;
                if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
@@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits:
 
        switch(msr & 0x7) {
        case 0:
-               fprintf(stderr, "pc0");
+               fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
                break;
        case 1:
-               fprintf(stderr, do_snb_cstates ? "pc2" : "pc0");
+               fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
                break;
        case 2:
-               fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3");
+               fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
                break;
        case 3:
-               fprintf(stderr, "pc6");
+               fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
                break;
        case 4:
-               fprintf(stderr, "pc7");
+               fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
                break;
        case 5:
-               fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid");
+               fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
+               break;
+       case 6:
+               fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
                break;
        case 7:
-               fprintf(stderr, "unlimited");
+               fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
                break;
        default:
                fprintf(stderr, "invalid");
@@ -1460,6 +1482,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
+       case 0x4D:      /* AVN */
                return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
@@ -1555,11 +1578,14 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
-               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
        case 0x3E:
-               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               break;
+       case 0x4D:      /* AVN */
+               do_rapl = RAPL_PKG | RAPL_CORES ;
                break;
        default:
                return;
@@ -1573,17 +1599,18 @@ void rapl_probe(unsigned int family, unsigned int model)
        rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
        rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
 
-       /* get TDP to determine energy counter range */
-       if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
-               return;
+       if (do_rapl & RAPL_PKG_POWER_INFO) {
+               /* get TDP to determine energy counter range */
+               if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
+                       return;
 
-       tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+               tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
-       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
-
-       if (verbose)
-               fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+               rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
 
+               if (verbose)
+                       fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+       }
        return;
 }
 
@@ -1702,7 +1729,8 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
                        local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units);
        }
-       if (do_rapl & RAPL_PKG) {
+       if (do_rapl & RAPL_PKG_POWER_INFO) {
+
                if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
                        return -5;
 
@@ -1714,6 +1742,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
                        ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
 
+       }
+       if (do_rapl & RAPL_PKG) {
+
                if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
                        return -9;
 
@@ -1749,12 +1780,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
                print_power_limit_msr(cpu, msr, "DRAM Limit");
        }
-       if (do_rapl & RAPL_CORES) {
+       if (do_rapl & RAPL_CORE_POLICY) {
                if (verbose) {
                        if (get_msr(cpu, MSR_PP0_POLICY, &msr))
                                return -7;
 
                        fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
+               }
+       }
+       if (do_rapl & RAPL_CORES) {
+               if (verbose) {
 
                        if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
                                return -9;
@@ -1813,10 +1848,47 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
 }
 
 
+int is_slm(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+       switch (model) {
+       case 0x4D:      /* AVN */
+               return 1;
+       }
+       return 0;
+}
+
+#define SLM_BCLK_FREQS 5
+double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
+
+double slm_bclk(void)
+{
+       unsigned long long msr = 3;
+       unsigned int i;
+       double freq;
+
+       if (get_msr(0, MSR_FSB_FREQ, &msr))
+               fprintf(stderr, "SLM BCLK: unknown\n");
+
+       i = msr & 0xf;
+       if (i >= SLM_BCLK_FREQS) {
+               fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
+               msr = 3;
+       }
+       freq = slm_freq_table[i];
+
+       fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
+
+       return freq;
+}
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
        if (is_snb(family, model))
                return 100.00;
+       else if (is_slm(family, model))
+               return slm_bclk();
        else
                return 133.33;
 }
@@ -1873,7 +1945,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
                fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
                        cpu, msr, target_c_local);
 
-       if (target_c_local < 85 || target_c_local > 120)
+       if (target_c_local < 85 || target_c_local > 127)
                goto guess;
 
        tcc_activation_temp = target_c_local;
@@ -1970,6 +2042,7 @@ void check_cpuid()
        do_smi = do_nhm_cstates;
        do_snb_cstates = is_snb(family, model);
        do_c8_c9_c10 = has_c8_c9_c10(family, model);
+       do_slm_cstates = is_slm(family, model);
        bclk = discover_bclk(family, model);
 
        do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2331,7 +2404,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.4 April 17, 2013"
+               fprintf(stderr, "turbostat v3.5 April 26, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index 46736604c26cda69fb7dc179dbce884c9cf849cc..a1203148dfa18990b1dc30af5db3c09e1e0b7012 100644 (file)
@@ -133,12 +133,6 @@ CROSS = frv-linux
 ARCH = frv
 GCC_VER = 4.5.1
 
-# h8300 - failed make defconfig??
-TEST_START IF ${RUN} == h8300 || ${DO_FAILED}
-CROSS = h8300-elf
-ARCH = h8300
-GCC_VER = 4.5.1
-
 # m68k fails with error?
 TEST_START IF ${RUN} == m68k || ${DO_DEFAULT}
 CROSS = m68k-linux
index 4fa655d68a81c8a1b8dd8b60f885721edc7cfea1..41bd85559d4b01aa8ee0e89615ece47c4115b0d0 100644 (file)
@@ -151,7 +151,7 @@ static int check_timer_create(int which)
        fflush(stdout);
 
        done = 0;
-       timer_create(which, NULL, &id);
+       err = timer_create(which, NULL, &id);
        if (err < 0) {
                perror("Can't create timer\n");
                return -1;
index 979bff485fb0b343cb5e1fd4328d7cf046dafef7..a9dd682cf5e3f5117de017156396337a8352914f 100644 (file)
@@ -1064,10 +1064,12 @@ EXPORT_SYMBOL_GPL(gfn_to_hva);
 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
 {
        struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
-       if (writable)
+       unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
+
+       if (!kvm_is_error_hva(hva) && writable)
                *writable = !memslot_is_readonly(slot);
 
-       return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false);
+       return hva;
 }
 
 static int kvm_read_hva(void *data, void __user *hva, int len)